diff options
115 files changed, 8132 insertions, 566 deletions
diff --git a/client/mysqltest.c b/client/mysqltest.c index 14258a3669e..6d596ab793c 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -852,10 +852,15 @@ static VAR *var_obtain(const char *name, int len) return v; } +/* + - if variable starts with a $ it is regarded as a local test varable + - if not it is treated as a environment variable, and the corresponding + environment variable will be updated +*/ int var_set(const char *var_name, const char *var_name_end, const char *var_val, const char *var_val_end) { - int digit; + int digit, result, env_var= 0; VAR* v; DBUG_ENTER("var_set"); DBUG_PRINT("enter", ("var_name: '%.*s' = '%.*s' (length: %d)", @@ -863,11 +868,11 @@ int var_set(const char *var_name, const char *var_name_end, (int) (var_val_end - var_val), var_val, (int) (var_val_end - var_val))); - if (*var_name++ != '$') - { - var_name--; - die("Variable name in %s does not start with '$'", var_name); - } + if (*var_name != '$') + env_var= 1; + else + var_name++; + digit = *var_name - '0'; if (!(digit < 10 && digit >= 0)) { @@ -875,7 +880,23 @@ int var_set(const char *var_name, const char *var_name_end, } else v = var_reg + digit; - DBUG_RETURN(eval_expr(v, var_val, (const char**)&var_val_end)); + + result= eval_expr(v, var_val, (const char**) &var_val_end); + + if (env_var) + { + char buf[1024]; + memcpy(buf, v->name, v->name_len); + buf[v->name_len]= 0; + if (v->int_dirty) + { + sprintf(v->str_val, "%d", v->int_val); + v->int_dirty= 0; + v->str_val_len= strlen(v->str_val); + } + setenv(buf, v->str_val, 1); + } + DBUG_RETURN(result); } @@ -1483,6 +1504,10 @@ int do_sync_with_master(struct st_query *query) return do_sync_with_master2(offset); } +/* + when ndb binlog is on, this call will wait until last updated epoch + (locally in the mysqld) has been received into the binlog +*/ int do_save_master_pos() { MYSQL_RES* res; @@ -1494,6 +1519,89 @@ int do_save_master_pos() rpl_parse = mysql_rpl_parse_enabled(mysql); mysql_disable_rpl_parse(mysql); +#ifdef HAVE_NDB_BINLOG + /* + Wait for ndb binlog to be up-to-date with all changes + done on the local mysql server + */ + { + ulong have_ndbcluster; + if (mysql_query(mysql, query= "show variables like 'have_ndbcluster'")) + die("At line %u: failed in %s: %d: %s", start_lineno, query, + mysql_errno(mysql), mysql_error(mysql)); + if (!(res= mysql_store_result(mysql))) + die("line %u: mysql_store_result() retuned NULL for '%s'", start_lineno, + query); + if (!(row= mysql_fetch_row(res))) + die("line %u: empty result in %s", start_lineno, query); + + have_ndbcluster= strcmp("YES", row[1]) == 0; + mysql_free_result(res); + + if (have_ndbcluster) + { + ulonglong epoch, tmp_epoch= 0; + int count= 0; + + do + { + const char binlog[]= "binlog"; + const char latest_trans_epoch[]= + "latest_trans_epoch="; + const char latest_applied_binlog_epoch[]= + "latest_applied_binlog_epoch="; + if (count) + sleep(1); + if (mysql_query(mysql, query= "show engine ndb status")) + die("At line %u: failed in '%s': %d: %s", start_lineno, query, + mysql_errno(mysql), mysql_error(mysql)); + if (!(res= mysql_store_result(mysql))) + die("line %u: mysql_store_result() retuned NULL for '%s'", + start_lineno, query); + while ((row= mysql_fetch_row(res))) + { + if (strcmp(row[1], binlog) == 0) + { + const char *status= row[2]; + /* latest_trans_epoch */ + if (count == 0) + { + while (*status && strncmp(status, latest_trans_epoch, + sizeof(latest_trans_epoch)-1)) + status++; + if (*status) + { + status+= sizeof(latest_trans_epoch)-1; + epoch= strtoull(status, (char**) 0, 10); + } + else + die("line %u: result does not contain '%s' in '%s'", + start_lineno, latest_trans_epoch, query); + } + /* latest_applied_binlog_epoch */ + while (*status && strncmp(status, latest_applied_binlog_epoch, + sizeof(latest_applied_binlog_epoch)-1)) + status++; + if (*status) + { + status+= sizeof(latest_applied_binlog_epoch)-1; + tmp_epoch= strtoull(status, (char**) 0, 10); + } + else + die("line %u: result does not contain '%s' in '%s'", + start_lineno, latest_applied_binlog_epoch, query); + break; + } + } + mysql_free_result(res); + if (!row) + die("line %u: result does not contain '%s' in '%s'", + start_lineno, binlog, query); + count++; + } while (tmp_epoch < epoch && count <= 3); + } + } +#endif if (mysql_query(mysql, query= "show master status")) die("failed in show master status: %d: %s", mysql_errno(mysql), mysql_error(mysql)); @@ -1544,7 +1652,8 @@ int do_let(struct st_query *query) while (*p && (*p != '=') && !my_isspace(charset_info,*p)) p++; var_name_end= p; - if (var_name+1 == var_name_end) + if (var_name == var_name_end || + (var_name+1 == var_name_end && *var_name == '$')) die("Missing variable name in let"); while (my_isspace(charset_info,*p)) p++; diff --git a/config/ac-macros/ha_ndbcluster.m4 b/config/ac-macros/ha_ndbcluster.m4 index f1a76d14cc4..196c6b111df 100644 --- a/config/ac-macros/ha_ndbcluster.m4 +++ b/config/ac-macros/ha_ndbcluster.m4 @@ -87,6 +87,11 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ [Extra CFLAGS for ndb compile]), [ndb_ccflags=${withval}], [ndb_ccflags=""]) + AC_ARG_WITH([ndb-binlog], + [ + --without-ndb-binlog Disable ndb binlog], + [ndb_binlog="$withval"], + [ndb_binlog="default"]) case "$ndb_ccflags" in "yes") @@ -185,6 +190,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [ ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a" ndbcluster_system_libs="" ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la" + mysql_se_objs="$mysql_se_objs ha_ndbcluster_binlog.o" MYSQL_CHECK_NDB_OPTIONS NDBCLUSTER_WORKAROUNDS @@ -219,6 +225,25 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [ ndb_port="1186" fi + have_ndb_binlog="no" + if test X"$ndb_binlog" = Xdefault || + test X"$ndb_binlog" = Xyes + then + if test X"$have_row_based" = Xyes + then + have_ndb_binlog="yes" + fi + fi + + if test X"$have_ndb_binlog" = Xyes + then + AC_DEFINE([HAVE_NDB_BINLOG], [1], + [Including Ndb Cluster Binlog]) + AC_MSG_RESULT([Including Ndb Cluster Binlog]) + else + AC_MSG_RESULT([Not including Ndb Cluster Binlog]) + fi + ndb_transporter_opt_objs="" if test "$ac_cv_func_shmget" = "yes" && test "$ac_cv_func_shmat" = "yes" && diff --git a/include/my_base.h b/include/my_base.h index 9b53ebffeb4..a6dc237d28a 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -154,7 +154,14 @@ enum ha_extra_function { to overwrite entire row. */ HA_EXTRA_KEYREAD_PRESERVE_FIELDS, - HA_EXTRA_MMAP + HA_EXTRA_MMAP, + /* + Ignore if the a tuple is not found, continue processing the + transaction and ignore that 'row'. Needed for idempotency + handling on the slave + */ + HA_EXTRA_IGNORE_NO_KEY, + HA_EXTRA_NO_IGNORE_NO_KEY }; /* The following is parameter to ha_panic() */ diff --git a/include/mysql_com.h b/include/mysql_com.h index cef63115ad3..75be3585659 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -49,7 +49,7 @@ enum enum_server_command COM_TIME, COM_DELAYED_INSERT, COM_CHANGE_USER, COM_BINLOG_DUMP, COM_TABLE_DUMP, COM_CONNECT_OUT, COM_REGISTER_SLAVE, COM_STMT_PREPARE, COM_STMT_EXECUTE, COM_STMT_SEND_LONG_DATA, COM_STMT_CLOSE, - COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, + COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON, /* don't forget to update const char *command_name[] in sql_parse.cc */ /* Must be last */ diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am index 1aad198d7ae..ec6dfecb359 100644 --- a/libmysqld/Makefile.am +++ b/libmysqld/Makefile.am @@ -66,11 +66,13 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \ parse_file.cc sql_view.cc sql_trigger.cc my_decimal.cc \ event_executor.cc event.cc event_timed.cc \ rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc \ - sql_tablespace.cc + sql_tablespace.cc \ + rpl_injector.cc libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources) EXTRA_libmysqld_a_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \ ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \ + ha_ndbcluster_binlog.cc \ ha_partition.cc libmysqld_a_DEPENDENCIES= @mysql_se_objs@ libmysqld_a_SOURCES= @@ -102,6 +104,9 @@ ha_berkeley.o: ha_berkeley.cc ha_ndbcluster.o:ha_ndbcluster.cc $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< +ha_ndbcluster_binlog.o: ha_ndbcluster_binlog.cc + $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< + # Until we can remove dependency on ha_ndbcluster.h handler.o: handler.cc $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< diff --git a/mysql-test/include/have_ndb_extra.inc b/mysql-test/include/have_ndb_extra.inc new file mode 100644 index 00000000000..4837a7ad4ea --- /dev/null +++ b/mysql-test/include/have_ndb_extra.inc @@ -0,0 +1,2 @@ +-- require r/have_ndb_extra.require +eval select $NDB_EXTRA_TEST; diff --git a/mysql-test/include/not_ndb.inc b/mysql-test/include/not_ndb.inc new file mode 100644 index 00000000000..965538c20a7 --- /dev/null +++ b/mysql-test/include/not_ndb.inc @@ -0,0 +1,7 @@ +-- require r/not_ndb.require +disable_query_log; +# so that both DISABLED and NO is output as NO +-- replace_result DISABLED NO +show variables like "have_ndbcluster"; +enable_query_log; + diff --git a/mysql-test/lib/init_db.sql b/mysql-test/lib/init_db.sql index 6235f91f849..ef6383f6680 100644 --- a/mysql-test/lib/init_db.sql +++ b/mysql-test/lib/init_db.sql @@ -594,3 +594,6 @@ CREATE TABLE event ( comment varchar(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', PRIMARY KEY (db,name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; + +CREATE DATABASE IF NOT EXISTS cluster_replication; +CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 541719427ed..efa5484ef73 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -140,6 +140,7 @@ our $glob_scriptname= undef; our $glob_timers= undef; our $glob_use_running_server= 0; our $glob_use_running_ndbcluster= 0; +our $glob_use_running_ndbcluster_slave= 0; our $glob_use_embedded_server= 0; our @glob_test_mode; @@ -233,6 +234,8 @@ our $instance_manager; our $opt_ndbcluster_port; our $opt_ndbconnectstring; +our $opt_ndbcluster_port_slave; +our $opt_ndbconnectstring_slave; our $opt_no_manager; # Does nothing now, we never use manager our $opt_manager_port; # Does nothing now, we never use manager @@ -300,12 +303,16 @@ our $opt_udiff; our $opt_skip_ndbcluster; our $opt_with_ndbcluster; +our $opt_skip_ndbcluster_slave; +our $opt_with_ndbcluster_slave; +our $opt_ndb_extra_test; our $exe_ndb_mgm; our $path_ndb_tools_dir; our $path_ndb_backup_dir; our $file_ndb_testrun_log; our $flag_ndb_status_ok= 1; +our $flag_ndb_slave_status_ok= 1; ###################################################################### # @@ -322,9 +329,13 @@ sub kill_running_server (); sub kill_and_cleanup (); sub check_ssl_support (); sub check_ndbcluster_support (); +sub rm_ndbcluster_tables ($); sub ndbcluster_install (); sub ndbcluster_start (); sub ndbcluster_stop (); +sub ndbcluster_install_slave (); +sub ndbcluster_start_slave (); +sub ndbcluster_stop_slave (); sub run_benchmarks ($); sub run_tests (); sub mysql_install_db (); @@ -493,6 +504,7 @@ sub command_line_setup () { my $opt_master_myport= 9306; my $opt_slave_myport= 9308; $opt_ndbcluster_port= 9350; + $opt_ndbcluster_port_slave= 9358; my $im_port= 9310; my $im_mysqld1_port= 9312; my $im_mysqld2_port= 9314; @@ -529,6 +541,10 @@ sub command_line_setup () { 'force' => \$opt_force, 'with-ndbcluster' => \$opt_with_ndbcluster, 'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster, + 'with-ndbcluster-slave' => \$opt_with_ndbcluster_slave, + 'skip-ndbcluster-slave|skip-ndb-slave' + => \$opt_skip_ndbcluster_slave, + 'ndb-extra-test' => \$opt_ndb_extra_test, 'do-test=s' => \$opt_do_test, 'suite=s' => \$opt_suite, 'skip-rpl' => \$opt_skip_rpl, @@ -539,6 +555,7 @@ sub command_line_setup () { 'master_port=i' => \$opt_master_myport, 'slave_port=i' => \$opt_slave_myport, 'ndbcluster_port=i' => \$opt_ndbcluster_port, + 'ndbcluster_port_slave=i' => \$opt_ndbcluster_port_slave, 'manager-port=i' => \$opt_manager_port, # Currently not used 'im-port=i' => \$im_port, # Instance Manager port. 'im-mysqld1-port=i' => \$im_mysqld1_port, # Port of mysqld, controlled by IM @@ -553,6 +570,7 @@ sub command_line_setup () { # Run test on running server 'extern' => \$opt_extern, 'ndbconnectstring=s' => \$opt_ndbconnectstring, + 'ndbconnectstring-slave=s' => \$opt_ndbconnectstring_slave, # Debugging 'gdb' => \$opt_gdb, @@ -759,9 +777,25 @@ sub command_line_setup () { $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port"; } + if ( $opt_ndbconnectstring_slave ) + { + $glob_use_running_ndbcluster_slave= 1; + $opt_with_ndbcluster_slave= 1; + } + else + { + $opt_ndbconnectstring_slave= "host=localhost:$opt_ndbcluster_port_slave"; + } + if ( $opt_skip_ndbcluster ) { $opt_with_ndbcluster= 0; + $opt_skip_ndbcluster_slave= 1; + } + + if ( $opt_skip_ndbcluster_slave ) + { + $opt_with_ndbcluster_slave= 0; } # The ":s" in the argument spec, means we have three different cases @@ -855,6 +889,8 @@ sub command_line_setup () { path_mysock => "$sockdir/slave.sock", path_myport => $opt_slave_myport, start_timeout => 400, + + ndbcluster => 1, # ndbcluster not started }; $slave->[1]= @@ -1188,6 +1224,8 @@ sub kill_running_server () { ndbcluster_stop(); $master->[0]->{'ndbcluster'}= 1; + ndbcluster_stop_slave(); + $slave->[0]->{'ndbcluster'}= 1; } } @@ -1358,6 +1396,15 @@ sub ndbcluster_start () { return 0; } +sub rm_ndbcluster_tables ($) { + my $dir= shift; + foreach my $bin ( glob("$dir/cluster_replication/apply_status*"), + glob("$dir/cluster_replication/schema*") ) + { + unlink($bin); + } +} + sub ndbcluster_stop () { if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster ) @@ -1371,6 +1418,71 @@ sub ndbcluster_stop () { "--stop"], "", "/dev/null", "", ""); + rm_ndbcluster_tables ($master->[0]->{'path_myddir'}); + rm_ndbcluster_tables ($master->[1]->{'path_myddir'}); + return; +} + +sub ndbcluster_install_slave () { + + if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave ) + { + return 0; + } + mtr_report("Install ndbcluster slave"); + if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port=$opt_ndbcluster_port_slave", + "--data-dir=$opt_vardir", + "--small", + "--ndbd-nodes=1", + "--initial"], + "", "", "", "") ) + { + mtr_error("Error ndbcluster_install_slave"); + return 1; + } + + ndbcluster_stop_slave(); + $slave->[0]->{'ndbcluster'}= 1; + + return 0; +} + +sub ndbcluster_start_slave () { + + if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave ) + { + return 0; + } + # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null + if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port=$opt_ndbcluster_port_slave", + "--data-dir=$opt_vardir", + "--ndbd-nodes=1"], + "", "/dev/null", "", "") ) + { + mtr_error("Error ndbcluster_start_slave"); + return 1; + } + + return 0; +} + +sub ndbcluster_stop_slave () { + + if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave ) + { + return; + } + # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null + mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port=$opt_ndbcluster_port_slave", + "--data-dir=$opt_vardir", + "--stop"], + "", "/dev/null", "", ""); + + rm_ndbcluster_tables ($slave->[0]->{'path_myddir'}); + return; } @@ -1525,6 +1637,13 @@ sub mysql_install_db () { $flag_ndb_status_ok= 0; } + if ( ndbcluster_install_slave() ) + { + # failed to install, disable usage but flag that its no ok + $opt_with_ndbcluster_slave= 0; + $flag_ndb_slave_status_ok= 0; + } + return 0; } @@ -1848,6 +1967,18 @@ sub run_testcase ($) { { if ( ! $slave->[$idx]->{'pid'} ) { + if ( $idx == 0) + { + if ( $slave->[0]->{'ndbcluster'} ) + { + $slave->[0]->{'ndbcluster'}= ndbcluster_start_slave(); + if ( $slave->[0]->{'ndbcluster'} ) + { + report_failure_and_restart($tinfo); + return; + } + } + } $slave->[$idx]->{'pid'}= mysqld_start('slave',$idx, $tinfo->{'slave_opt'}, $tinfo->{'slave_mi'}); @@ -2115,6 +2246,12 @@ sub mysqld_arguments ($$$$$) { { mtr_add_arg($args, "%s--skip-ndbcluster", $prefix); } + if ( $opt_with_ndbcluster ) + { + mtr_add_arg($args, "%s--ndbcluster", $prefix); + mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, + $opt_ndbconnectstring); + } } if ( $type eq 'slave' ) @@ -2171,6 +2308,17 @@ sub mysqld_arguments ($$$$$) { mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id); mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank); } + + if ( $opt_skip_ndbcluster_slave ) + { + mtr_add_arg($args, "%s--skip-ndbcluster", $prefix); + } + if ( $idx == 0 and $opt_with_ndbcluster_slave ) + { + mtr_add_arg($args, "%s--ndbcluster", $prefix); + mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, + $opt_ndbconnectstring_slave); + } } # end slave if ( $opt_debug ) @@ -2187,13 +2335,6 @@ sub mysqld_arguments ($$$$$) { } } - if ( $opt_with_ndbcluster ) - { - mtr_add_arg($args, "%s--ndbcluster", $prefix); - mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, - $opt_ndbconnectstring); - } - # FIXME always set nowdays??? SMALL_SERVER mtr_add_arg($args, "%s--key_buffer_size=1M", $prefix); mtr_add_arg($args, "%s--sort_buffer=256K", $prefix); @@ -2408,6 +2549,12 @@ sub stop_slaves () { } } + if ( ! $slave->[0]->{'ndbcluster'} ) + { + ndbcluster_stop_slave(); + $slave->[0]->{'ndbcluster'}= 1; + } + mtr_stop_mysqld_servers(\@args); } @@ -2617,6 +2764,8 @@ sub run_mysqltest ($) { $ENV{'MYSQL_MY_PRINT_DEFAULTS'}= $exe_my_print_defaults; $ENV{'NDB_STATUS_OK'}= $flag_ndb_status_ok; + $ENV{'NDB_SLAVE_STATUS_OK'}= $flag_ndb_slave_status_ok; + $ENV{'NDB_EXTRA_TEST'}= $opt_ndb_extra_test; $ENV{'NDB_MGM'}= $exe_ndb_mgm; $ENV{'NDB_BACKUP_DIR'}= $path_ndb_backup_dir; $ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir; diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 0dd405e999e..e2641472ad2 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -222,11 +222,14 @@ export UMASK UMASK_DIR MASTER_RUNNING=0 MASTER1_RUNNING=0 +MASTER_MYHOST=127.0.0.1 MASTER_MYPORT=9306 SLAVE_RUNNING=0 +SLAVE_MYHOST=127.0.0.1 SLAVE_MYPORT=9308 # leave room for 2 masters for cluster tests MYSQL_MANAGER_PORT=9305 # needs to be out of the way of slaves NDBCLUSTER_PORT=9350 +NDBCLUSTER_PORT_SLAVE=9358 MYSQL_MANAGER_PW_FILE=$MYSQL_TEST_DIR/var/tmp/manager.pwd MYSQL_MANAGER_LOG=$MYSQL_TEST_DIR/var/log/manager.log MYSQL_MANAGER_USER=root @@ -262,7 +265,11 @@ EXTRA_MYSQLSHOW_OPT="" EXTRA_MYSQLBINLOG_OPT="" USE_RUNNING_SERVER=0 USE_NDBCLUSTER=@USE_NDBCLUSTER@ +USE_NDBCLUSTER_SLAVE=@USE_NDBCLUSTER@ USE_RUNNING_NDBCLUSTER="" +USE_RUNNING_NDBCLUSTER_SLAVE="" +NDB_EXTRA_TEST=0 +NDBCLUSTER_EXTRA_OPTS="" USE_PURIFY="" PURIFY_LOGS="" DO_GCOV="" @@ -288,6 +295,7 @@ TEST_MODE="" NDB_MGM_EXTRA_OPTS= NDB_MGMD_EXTRA_OPTS= NDBD_EXTRA_OPTS= +SLAVE_MYSQLDBINLOG=1 DO_STRESS="" STRESS_SUITE="main" @@ -325,9 +333,18 @@ while test $# -gt 0; do --extern) USE_RUNNING_SERVER=1 ;; --with-ndbcluster) USE_NDBCLUSTER="--ndbcluster" ;; + --with-ndbcluster-slave) + USE_NDBCLUSTER_SLAVE="--ndbcluster" ;; --ndb-connectstring=*) USE_NDBCLUSTER="--ndbcluster" ; USE_RUNNING_NDBCLUSTER=`$ECHO "$1" | $SED -e "s;--ndb-connectstring=;;"` ;; + --ndb-connectstring-slave=*) + USE_NDBCLUSTER_SLAVE="--ndbcluster" ; + USE_RUNNING_NDBCLUSTER_SLAVE=`$ECHO "$1" | $SED -e "s;--ndb-connectstring-slave=;;"` ;; + --ndb-extra-test) + NDBCLUSTER_EXTRA_OPTS=" " + NDB_EXTRA_TEST=1 ; + ;; --ndb_mgm-extra-opts=*) NDB_MGM_EXTRA_OPTS=`$ECHO "$1" | $SED -e "s;--ndb_mgm-extra-opts=;;"` ;; --ndb_mgmd-extra-opts=*) @@ -344,6 +361,8 @@ while test $# -gt 0; do --slave_port=*) SLAVE_MYPORT=`$ECHO "$1" | $SED -e "s;--slave_port=;;"` ;; --manager-port=*) MYSQL_MANAGER_PORT=`$ECHO "$1" | $SED -e "s;--manager_port=;;"` ;; --ndbcluster_port=*) NDBCLUSTER_PORT=`$ECHO "$1" | $SED -e "s;--ndbcluster_port=;;"` ;; + --ndbcluster-port=*) NDBCLUSTER_PORT=`$ECHO "$1" | $SED -e "s;--ndbcluster-port=;;"` ;; + --ndbcluster-port-slave=*) NDBCLUSTER_PORT_SLAVE=`$ECHO "$1" | $SED -e "s;--ndbcluster-port-slave=;;"` ;; --with-openssl) EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT \ --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem \ @@ -500,6 +519,10 @@ while test $# -gt 0; do TMP=`$ECHO "$1" | $SED -e "s;--valgrind-options=;;"` VALGRIND="$VALGRIND $TMP" ;; + --skip-ndbcluster-slave | --skip-ndb-slave) + USE_NDBCLUSTER_SLAVE="" + EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-ndbcluster" + ;; --valgrind-mysqltest | --valgrind-mysqltest-all) find_valgrind; VALGRIND_MYSQLTEST=$FIND_VALGRIND @@ -510,9 +533,11 @@ while test $# -gt 0; do ;; --skip-ndbcluster | --skip-ndb) USE_NDBCLUSTER="" + USE_NDBCLUSTER_SLAVE="" EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-ndbcluster" EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-ndbcluster" ;; + --skip-slave-binlog) SLAVE_MYSQLDBINLOG=0 ;; --skip-*) EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1" EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT $1" @@ -582,9 +607,13 @@ SLAVE_MYERR="$MYSQL_TEST_DIR/var/log/slave.err" CURRENT_TEST="$MYSQL_TEST_DIR/var/log/current_test" SMALL_SERVER="--key_buffer_size=1M --sort_buffer=256K --max_heap_table_size=1M" -export MASTER_MYPORT SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MASTER_MYSOCK1 +export MASTER_MYHOST MASTER_MYPORT SLAVE_MYHOST SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MASTER_MYSOCK1 NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS" +NDBCLUSTER_OPTS_SLAVE="--port=$NDBCLUSTER_PORT_SLAVE --data-dir=$MYSQL_TEST_DIR/var" +if [ -n "$USE_NDBCLUSTER_SLAVE" ] ; then + USE_NDBCLUSTER_SLAVE="$USE_NDBCLUSTER_SLAVE --ndb-connectstring=localhost:$NDBCLUSTER_PORT_SLAVE" +fi NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log @@ -727,7 +756,6 @@ else fi MYSQL_TEST="$CLIENT_BINDIR/mysqltest" MYSQL_CHECK="$CLIENT_BINDIR/mysqlcheck" - MYSQL_DUMP="$CLIENT_BINDIR/mysqldump" MYSQL_SLAP="$CLIENT_BINDIR/mysqlslap" MYSQL_SHOW="$CLIENT_BINDIR/mysqlshow" MYSQL_IMPORT="$CLIENT_BINDIR/mysqlimport" @@ -767,6 +795,13 @@ else MYSQL_TEST="$CLIENT_BINDIR/mysqltest" MYSQL_CLIENT_TEST="$CLIENT_BINDIR/mysql_client_test" fi + if [ -f "$BASEDIR/client/.libs/mysqldump" ] ; then + MYSQL_DUMP="$BASEDIR/client/.libs/mysqldump" + elif [ -f "$BASEDIR/client/mysqldump" ] ; then + MYSQL_DUMP="$BASEDIR/client/mysqldump" + else + MYSQL_DUMP="$BASEDIR/bin/mysqldump" + fi fi if [ -z "$MASTER_MYSQLD" ] @@ -829,6 +864,9 @@ export NDB_TOOLS_OUTPUT export PURIFYOPTIONS NDB_STATUS_OK=1 export NDB_STATUS_OK +NDB_SLAVE_STATUS_OK=1 +export NDB_SLAVE_STATUS_OK +export NDB_EXTRA_TEST NDBCLUSTER_PORT NDBCLUSTER_PORT_SLAVE MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \ --user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \ @@ -1207,24 +1245,30 @@ EOF start_ndbcluster() { - if [ ! -z "$USE_NDBCLUSTER" ] + if [ ! -n "$USE_NDBCLUSTER" ] ; + then + USING_NDBCLUSTER=0 + USE_NDBCLUSTER_OPT= + fi + + if [ x$USING_NDBCLUSTER = x1 -a -z "$USE_NDBCLUSTER_OPT" ] then rm -f $NDB_TOOLS_OUTPUT if [ -z "$USE_RUNNING_NDBCLUSTER" ] then - echo "Starting ndbcluster" - if [ "$DO_BENCH" = 1 ] + if [ "$DO_BENCH" != 1 -a -z "$NDBCLUSTER_EXTRA_OPTS" ] then - NDBCLUSTER_EXTRA_OPTS="" - else NDBCLUSTER_EXTRA_OPTS="--small" fi - ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || NDB_STATUS_OK=0 + OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=2 --initial" + echo "Starting master ndbcluster " $OPTS + ./ndb/ndbcluster $OPTS || NDB_STATUS_OK=0 if [ x$NDB_STATUS_OK != x1 ] ; then if [ x$FORCE != x1 ] ; then exit 1 fi - USE_NDBCLUSTER= + USING_NDBCLUSTER=0 + USE_NDBCLUSTER_OPT= return fi @@ -1233,19 +1277,30 @@ start_ndbcluster() NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER" echo "Using ndbcluster at $NDB_CONNECTSTRING" fi - USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\"" + USE_NDBCLUSTER_OPT="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\"" export NDB_CONNECTSTRING fi } +rm_ndbcluster_tables() +{ + $RM -f $1/cluster_replication/apply_status* + $RM -f $1/cluster_replication/schema* +} + stop_ndbcluster() { - if [ ! -z "$USE_NDBCLUSTER" ] + if [ -n "$USE_NDBCLUSTER_OPT" ] then + USE_NDBCLUSTER_OPT= if [ -z "$USE_RUNNING_NDBCLUSTER" ] then # Kill any running ndbcluster stuff + $ECHO "Stopping master cluster" ./ndb/ndbcluster $NDBCLUSTER_OPTS --stop + # remove ndb table the hard way as not to upset later tests + rm_ndbcluster_tables $MASTER_MYDDIR + rm_ndbcluster_tables $MASTER_MYDDIR"1" fi fi } @@ -1300,7 +1355,7 @@ start_master() --local-infile \ --exit-info=256 \ --core \ - $USE_NDBCLUSTER \ + $USE_NDBCLUSTER_OPT \ --datadir=$MASTER_MYDDIR$1 \ --pid-file=$MASTER_MYPID$1 \ --socket=$MASTER_MYSOCK$1 \ @@ -1330,7 +1385,7 @@ start_master() --character-sets-dir=$CHARSETSDIR \ --default-character-set=$CHARACTER_SET \ --core \ - $USE_NDBCLUSTER \ + $USE_NDBCLUSTER_OPT \ --tmpdir=$MYSQL_TMP_DIR \ --language=$LANGUAGE \ --innodb_data_file_path=ibdata1:128M:autoextend \ @@ -1397,6 +1452,7 @@ start_slave() [ x$SKIP_SLAVE = x1 ] && return eval "this_slave_running=\$SLAVE$1_RUNNING" [ x$this_slave_running = 1 ] && return + # When testing fail-safe replication, we will have more than one slave # in this case, we start secondary slaves with an argument slave_ident="slave$1" @@ -1420,6 +1476,36 @@ start_slave() slave_pid=$SLAVE_MYPID slave_sock="$SLAVE_MYSOCK" fi + + # + if [ x$USING_NDBCLUSTER = x1 -a -n "$USE_NDBCLUSTER_SLAVE" ] ; then + if [ $slave_server_id -eq 2 ] ; then + savedir=`pwd` + cd $MYSQL_TEST_DIR + if [ "$DO_BENCH" != 1 -a -z "$NDBCLUSTER_EXTRA_OPTS" ] + then + NDBCLUSTER_EXTRA_OPTS="--small" + fi + + OPTS="$NDBCLUSTER_OPTS_SLAVE --initial $NDBCLUSTER_EXTRA_OPTS --ndbd-nodes=1 --verbose=2" + echo "Starting slave ndbcluster " $OPTS + ./ndb/ndbcluster $OPTS \ + || NDB_SLAVE_STATUS_OK=0 + # > /dev/null 2>&1 || NDB_SLAVE_STATUS_OK=0 + cd $savedir + if [ x$NDB_SLAVE_STATUS_OK != x1 ] ; then + if [ x$FORCE != x1 ] ; then + exit 1 + fi + USE_NDBCLUSTER_SLAVE_OPT= + USE_NDBCLUSTER_SLAVE= + fi + USE_NDBCLUSTER_SLAVE_OPT=$USE_NDBCLUSTER_SLAVE + fi + else + USE_NDBCLUSTER_SLAVE_OPT= + fi + # Remove stale binary logs and old master.info files # except for too tests which need them if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ] @@ -1445,12 +1531,16 @@ start_slave() master_info=$SLAVE_MASTER_INFO fi + if [ x$SLAVE_MYSQLDBINLOG = x1 ] + then + EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --log-bin=$MYSQL_TEST_DIR/var/log/$slave_ident-bin --log-slave-updates" + fi + $RM -f $slave_datadir/log.* slave_args="--no-defaults $master_info \ --exit-info=256 \ - --log-bin=$MYSQL_TEST_DIR/var/log/$slave_ident-bin \ + $SLAVE_MYSQLDBINLOG_OPT \ --relay-log=$MYSQL_TEST_DIR/var/log/$slave_ident-relay-bin \ - --log-slave-updates \ --log=$slave_log \ --basedir=$MY_BASEDIR \ --datadir=$slave_datadir \ @@ -1472,7 +1562,8 @@ start_slave() --log-bin-trust-function-creators \ --loose-binlog-show-xid=0 \ $SMALL_SERVER \ - $EXTRA_SLAVE_MYSQLD_OPT $EXTRA_SLAVE_OPT" + $EXTRA_SLAVE_MYSQLD_OPT $EXTRA_SLAVE_OPT \ + $USE_NDBCLUSTER_SLAVE_OPT" CUR_MYERR=$slave_err CUR_MYSOCK=$slave_sock @@ -1520,7 +1611,6 @@ mysql_start () # start_master # start_slave cd $MYSQL_TEST_DIR - start_ndbcluster return 1 } @@ -1554,6 +1644,14 @@ stop_slave () sleep $SLEEP_TIME_AFTER_RESTART fi eval "SLAVE$1_RUNNING=0" + if [ -n "$USE_NDBCLUSTER_SLAVE_OPT" ] ; then + savedir=`pwd` + cd $MYSQL_TEST_DIR + $ECHO "Stopping slave cluster" + ./ndb/ndbcluster $NDBCLUSTER_OPTS_SLAVE --stop + rm_ndbcluster_tables $SLAVE_MYDDIR + cd $savedir + fi fi } @@ -1642,6 +1740,7 @@ run_testcase () result_file="r/$tname.result" echo $tname > $CURRENT_TEST SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0 \& \( $tname : federated \) = 0` + NDBCLUSTER_TEST=`$EXPR \( $tname : '.*ndb.*' \) != 0` if [ "$USE_MANAGER" = 1 ] ; then many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)` fi @@ -1731,9 +1830,19 @@ run_testcase () esac stop_master stop_master 1 + + # only stop the cluster if this test will not use cluster + if [ x$NDBCLUSTER_TEST != x1 ] ; + then + stop_ndbcluster + fi + report_current_test $tname + USING_NDBCLUSTER=$NDBCLUSTER_TEST + # start_ndbcluster knows if cluster is already started + start_ndbcluster start_master - if [ -n "$USE_NDBCLUSTER" -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then + if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then start_master 1 fi TZ=$MY_TZ; export TZ @@ -1742,14 +1851,25 @@ run_testcase () # or there is no master running (FIXME strange.....) # or there is a master init script if [ ! -z "$EXTRA_MASTER_OPT" ] || [ x$MASTER_RUNNING != x1 ] || \ - [ -f $master_init_script ] + [ -f $master_init_script ] || \ + [ -n "$USE_NDBCLUSTER" -a x$NDBCLUSTER_TEST != x$USING_NDBCLUSTER ] then EXTRA_MASTER_OPT="" stop_master stop_master 1 + + # only stop the cluster if this test will not use cluster + if [ x$NDBCLUSTER_TEST != x1 ] ; + then + stop_ndbcluster + fi + report_current_test $tname + USING_NDBCLUSTER=$NDBCLUSTER_TEST + # start_ndbcluster knows if cluster is already started + start_ndbcluster start_master - if [ -n "$USE_NDBCLUSTER" -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then + if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then start_master 1 fi else @@ -1783,6 +1903,18 @@ run_testcase () fi fi + USING_NDBCLUSTER=$NDBCLUSTER_TEST + if [ -n "$USE_NDBCLUSTER_SLAVE_OPT" ] ; then + if [ x$USING_NDBCLUSTER != x1 ] ; then + do_slave_restart=1 + fi + else + if [ x$USING_NDBCLUSTER = x1 ] ; then + do_slave_restart=1 + fi + fi + + if [ x$do_slave_restart = x1 ] ; then stop_slave echo "CURRENT_TEST: $tname" >> $SLAVE_MYERR @@ -2041,6 +2173,8 @@ then fi fi + # just to force stopping anything from previous runs + USE_NDBCLUSTER_OPT=$USE_NDBCLUSTER stop_ndbcluster # Remove files that can cause problems @@ -2058,7 +2192,8 @@ then if [ -n "$1" -a `expr "X$*" : '.*ndb'` -eq 0 ] then - USE_NDBCLUSTER="" + USING_NDBCLUSTER=0 + USE_NDBCLUSTER_OPT= fi start_manager @@ -2090,7 +2225,7 @@ then EXTRA_BENCH_ARGS="--small-test --small-tables" fi - if [ ! -z "$USE_NDBCLUSTER" ] + if [ x$USING_NDBCLUSTER = x1 ] then EXTRA_BENCH_ARGS="--create-options=TYPE=ndb $EXTRA_BENCH_ARGS" fi diff --git a/mysql-test/ndb/Makefile.am b/mysql-test/ndb/Makefile.am index 178e40fb19a..1ef02bec1f0 100644 --- a/mysql-test/ndb/Makefile.am +++ b/mysql-test/ndb/Makefile.am @@ -6,7 +6,7 @@ test_SCRIPTS = ndbcluster noinst_HEADERS = ndbcluster.sh -dist_test_DATA = ndb_config_2_node.ini +dist_test_DATA = ndb_config_2_node.ini ndb_config_1_node.ini SUFFIXES = .sh diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result index 979e5d48871..02342485c04 100644 --- a/mysql-test/r/drop.result +++ b/mysql-test/r/drop.result @@ -47,6 +47,7 @@ create database mysqltest; show databases; Database information_schema +cluster_replication mysql mysqltest test @@ -58,6 +59,7 @@ drop database mysqltest; show databases; Database information_schema +cluster_replication mysql test drop database mysqltest; diff --git a/mysql-test/r/have_ndb_extra.require b/mysql-test/r/have_ndb_extra.require new file mode 100644 index 00000000000..8f7c125196a --- /dev/null +++ b/mysql-test/r/have_ndb_extra.require @@ -0,0 +1,3 @@ +select 1; +1 +1 diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 5357220b6c5..9cb96c2d73b 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -14,6 +14,7 @@ NULL test latin1 latin1_swedish_ci NULL select schema_name from information_schema.schemata; schema_name information_schema +cluster_replication mysql test show databases like 't%'; @@ -22,6 +23,7 @@ test show databases; Database information_schema +cluster_replication mysql test show databases where `database` = 't%'; @@ -55,6 +57,7 @@ TABLE_PRIVILEGES TRIGGERS VIEWS USER_PRIVILEGES +binlog_index columns_priv db event @@ -331,6 +334,7 @@ create view v0 (c) as select schema_name from information_schema.schemata; select * from v0; c information_schema +cluster_replication mysql test explain select * from v0; @@ -728,7 +732,7 @@ CREATE TABLE t_crashme ( f1 BIGINT); CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1; CREATE VIEW a2 AS SELECT t_CRASHME FROM a1; count(*) -106 +107 drop view a2, a1; drop table t_crashme; select table_schema,table_name, column_name from @@ -810,6 +814,7 @@ delete from mysql.db where user='mysqltest_4'; flush privileges; SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA; table_schema count(*) +cluster_replication 1 information_schema 19 mysql 19 create table t1 (i int, j int); diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result index 1d6bfda0f55..e6251dd9422 100644 --- a/mysql-test/r/mysqlcheck.result +++ b/mysql-test/r/mysqlcheck.result @@ -1,3 +1,4 @@ +cluster_replication.binlog_index OK mysql.columns_priv OK mysql.db OK mysql.event OK diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index 067054510c2..ed474265b9b 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -215,12 +215,11 @@ a long $where variable content mysqltest: At line 1: Missing arguments to let mysqltest: At line 1: Missing variable name in let -mysqltest: At line 1: Variable name in hi=hi does not start with '$' mysqltest: At line 1: Missing assignment operator in let mysqltest: At line 1: Missing assignment operator in let mysqltest: At line 1: Missing arguments to let mysqltest: At line 1: Missing variable name in let -mysqltest: At line 1: Variable name in =hi does not start with '$' +mysqltest: At line 1: Missing variable name in let mysqltest: At line 1: Missing assignment operator in let mysqltest: At line 1: Missing file name in source mysqltest: At line 1: Could not open file ./non_existingFile diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result index 7526e7348d6..a0220a0bad8 100644 --- a/mysql-test/r/ndb_alter_table.result +++ b/mysql-test/r/ndb_alter_table.result @@ -170,35 +170,6 @@ c 4 5 drop table t1; -create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) -engine=ndb; -insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); -create index c on t1(c); -select * from t1 where b = 'two'; -a b c -2 two two -alter table t1 drop index c; -select * from t1 where b = 'two'; -ERROR HY000: Can't lock file (errno: 159) -select * from t1 where b = 'two'; -a b c -2 two two -drop table t1; -create table t3 (a int primary key) engine=ndbcluster; -begin; -insert into t3 values (1); -alter table t3 rename t4; -delete from t3; -insert into t3 values (1); -commit; -select * from t3; -ERROR HY000: Can't lock file (errno: 155) -select * from t4; -a -1 -drop table t4; -show tables; -Tables_in_test create table t1 ( ai bigint auto_increment, c001 int(11) not null, diff --git a/mysql-test/r/ndb_alter_table_row.result b/mysql-test/r/ndb_alter_table_row.result new file mode 100644 index 00000000000..450b2c9a5af --- /dev/null +++ b/mysql-test/r/ndb_alter_table_row.result @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t1; +create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) +engine=ndb; +insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); +create index c on t1(c); +select * from t1 where c = 'two'; +a b c +2 two two +alter table t1 drop index c; +select * from t1 where c = 'two'; +a b c +2 two two +drop table t1; +create table t3 (a int primary key) engine=ndbcluster; +begin; +insert into t3 values (1); +alter table t3 rename t4; +commit; +select * from t3; +ERROR 42S02: Table 'test.t3' doesn't exist +select * from t4; +a +1 +drop table t4; +show tables; +Tables_in_test diff --git a/mysql-test/r/ndb_alter_table_stm.result b/mysql-test/r/ndb_alter_table_stm.result new file mode 100644 index 00000000000..a48c23f3cbd --- /dev/null +++ b/mysql-test/r/ndb_alter_table_stm.result @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS t1; +create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) +engine=ndb; +insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); +create index c on t1(c); +select * from t1 where c = 'two'; +a b c +2 two two +alter table t1 drop index c; +select * from t1 where c = 'two'; +ERROR HY000: Can't lock file (errno: 159) +select * from t1 where c = 'two'; +a b c +2 two two +drop table t1; +create table t3 (a int primary key) engine=ndbcluster; +begin; +insert into t3 values (1); +alter table t3 rename t4; +delete from t3; +insert into t3 values (1); +commit; +select * from t3; +ERROR HY000: Can't lock file (errno: 155) +select * from t4; +a +1 +drop table t4; +show tables; +Tables_in_test diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 00308720792..1e863c9012f 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -6,6 +6,13 @@ attr1 INT NOT NULL, attr2 INT, attr3 VARCHAR(10) ) ENGINE=ndbcluster; +drop table t1; +CREATE TABLE t1 ( +pk1 INT NOT NULL PRIMARY KEY, +attr1 INT NOT NULL, +attr2 INT, +attr3 VARCHAR(10) +) ENGINE=ndbcluster; SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t1 0 PRIMARY 1 pk1 A 0 NULL NULL BTREE diff --git a/mysql-test/r/ndb_binlog_basic.result b/mysql-test/r/ndb_binlog_basic.result new file mode 100644 index 00000000000..d84a331ee16 --- /dev/null +++ b/mysql-test/r/ndb_binlog_basic.result @@ -0,0 +1,62 @@ +drop table if exists t1, t2; +drop database if exists mysqltest; +create database mysqltest; +use mysqltest; +drop database mysqltest; +use test; +create table t1 (a int primary key) engine=ndb; +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index; +@max_epoch:=max(epoch)-1 +# +delete from t1; +alter table t1 add (b int); +insert into t1 values (3,3),(4,4); +alter table t1 rename t2; +begin; +insert into t2 values (1,1),(2,2); +update t2 set b=1 where a=3; +delete from t2 where a=4; +commit; +drop table t2; +select inserts from cluster_replication.binlog_index where epoch > @max_epoch and inserts > 5; +inserts +10 +select deletes from cluster_replication.binlog_index where epoch > @max_epoch and deletes > 5; +deletes +10 +select inserts,updates,deletes from +cluster_replication.binlog_index where epoch > @max_epoch and updates > 0; +inserts updates deletes +2 1 1 +select schemaops from +cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0; +schemaops +1 +1 +1 +flush logs; +purge master logs before now(); +select count(*) from cluster_replication.binlog_index; +count(*) +0 +create table t1 (a int primary key, b int) engine=ndb; +create database mysqltest; +use mysqltest; +create table t1 (c int, d int primary key) engine=ndb; +use test; +insert into mysqltest.t1 values (2,1),(2,2); +select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index; +@max_epoch:=max(epoch)-1 +# +drop table t1; +drop database mysqltest; +select inserts,updates,deletes from +cluster_replication.binlog_index where epoch > @max_epoch and inserts > 0; +inserts updates deletes +2 0 0 +select schemaops from +cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0; +schemaops +1 +1 diff --git a/mysql-test/r/ndb_binlog_multi.result b/mysql-test/r/ndb_binlog_multi.result new file mode 100644 index 00000000000..5f739327db2 --- /dev/null +++ b/mysql-test/r/ndb_binlog_multi.result @@ -0,0 +1,43 @@ +drop table if exists t1,t2; +drop table if exists t1,t2; +SHOW TABLES; +Tables_in_test +CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB; +show tables; +Tables_in_test +t2 +INSERT INTO t2 VALUES (1,1),(2,2); +select * from t2 order by a; +a b +1 1 +2 2 +SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM +cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1; +@the_epoch:=epoch inserts updates deletes schemaops +<the_epoch> 2 0 0 0 +SELECT * FROM t2 ORDER BY a; +a b +1 1 +2 2 +SELECT inserts,updates,deletes,schemaops FROM +cluster_replication.binlog_index WHERE epoch=<the_epoch>; +inserts updates deletes schemaops +2 0 0 0 +DROP TABLE t2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB; +INSERT INTO t1 VALUES (1),(2); +SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM +cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1; +@the_epoch2:=epoch inserts updates deletes schemaops +<the_epoch2> 2 0 0 0 +SELECT inserts,updates,deletes,schemaops FROM +cluster_replication.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>; +inserts updates deletes schemaops +0 0 0 1 +drop table t1; +SHOW TABLES; +Tables_in_test +SELECT inserts,updates,deletes,schemaops FROM +cluster_replication.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>; +inserts updates deletes schemaops +0 0 0 1 diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result index 8409ccab1ea..5147d052d6e 100644 --- a/mysql-test/r/ndb_multi.result +++ b/mysql-test/r/ndb_multi.result @@ -1,4 +1,5 @@ drop table if exists t1, t2, t3, t4; +drop table if exists t1, t2, t3, t4; flush status; create table t1 (a int) engine=ndbcluster; create table t2 (a int) engine=ndbcluster; diff --git a/mysql-test/r/ndb_multi_row.result b/mysql-test/r/ndb_multi_row.result new file mode 100644 index 00000000000..ac94ebc3e5c --- /dev/null +++ b/mysql-test/r/ndb_multi_row.result @@ -0,0 +1,69 @@ +drop table if exists t1, t2, t3, t4; +drop table if exists t1, t2, t3, t4; +flush status; +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +a +2 +select * from t2; +a +3 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +select * from t1; +a +2 +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +select * from t1; +a +2 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +select * from t1; +a +2 +flush status; +select * from t1; +a +2 +update t1 set a=3 where a=2; +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 0 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; +select * from t1; +a +3 +select * from t3; +a b c last_col +1 Hi! 89 Longtext column +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 1 +show tables like 't4'; +Tables_in_test (t4) +t4 +show status like 'handler_discover%'; +Variable_name Value +Handler_discover 2 +show tables; +Tables_in_test +t1 +t2 +t3 +t4 +drop table t1, t2, t3, t4; +drop table t1, t3, t4; diff --git a/mysql-test/r/not_ndb.require b/mysql-test/r/not_ndb.require new file mode 100644 index 00000000000..36fcf7958d4 --- /dev/null +++ b/mysql-test/r/not_ndb.require @@ -0,0 +1,2 @@ +Variable_name Value +have_ndbcluster NO diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index 756772c16a5..cf858cc1d32 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -259,6 +259,7 @@ prepare stmt4 from ' show databases '; execute stmt4; Database information_schema +cluster_replication mysql test prepare stmt4 from ' show tables from test like ''t2%'' '; diff --git a/mysql-test/r/rpl000009.result b/mysql-test/r/rpl000009.result index a4dbf54f39b..9a3a2e7af3f 100644 --- a/mysql-test/r/rpl000009.result +++ b/mysql-test/r/rpl000009.result @@ -33,6 +33,7 @@ create database mysqltest; show databases; Database information_schema +cluster_replication mysql mysqltest mysqltest2 @@ -50,6 +51,7 @@ set sql_log_bin = 1; show databases; Database information_schema +cluster_replication mysql test create database mysqltest2; @@ -69,6 +71,7 @@ load data from master; show databases; Database information_schema +cluster_replication mysql mysqltest mysqltest2 diff --git a/mysql-test/r/rpl_create_database.result b/mysql-test/r/rpl_create_database.result index 82ab8c6f434..74708c8ec9a 100644 --- a/mysql-test/r/rpl_create_database.result +++ b/mysql-test/r/rpl_create_database.result @@ -23,6 +23,7 @@ ALTER DATABASE mysqltest_bob CHARACTER SET latin1; SHOW DATABASES; Database information_schema +cluster_replication mysql mysqltest_bob mysqltest_prometheus @@ -31,6 +32,7 @@ test SHOW DATABASES; Database information_schema +cluster_replication mysql mysqltest_prometheus mysqltest_sisyfos @@ -45,6 +47,7 @@ CREATE TABLE t2 (a INT); SHOW DATABASES; Database information_schema +cluster_replication mysql mysqltest_bob mysqltest_prometheus @@ -53,6 +56,7 @@ test SHOW DATABASES; Database information_schema +cluster_replication mysql mysqltest_prometheus mysqltest_sisyfos diff --git a/mysql-test/r/rpl_loaddata_m.result b/mysql-test/r/rpl_loaddata_m.result index 1d263a41e1b..54348df9aaa 100644 --- a/mysql-test/r/rpl_loaddata_m.result +++ b/mysql-test/r/rpl_loaddata_m.result @@ -21,6 +21,7 @@ COUNT(*) SHOW DATABASES; Database information_schema +cluster_replication mysql mysqltest test diff --git a/mysql-test/r/rpl_ndb_bank.result b/mysql-test/r/rpl_ndb_bank.result new file mode 100644 index 00000000000..2d2d54c9170 --- /dev/null +++ b/mysql-test/r/rpl_ndb_bank.result @@ -0,0 +1,120 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE DATABASE IF NOT EXISTS BANK; +DROP DATABASE BANK; +CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin; +USE BANK; +CREATE TABLE GL ( TIME BIGINT UNSIGNED NOT NULL, +ACCOUNT_TYPE INT UNSIGNED NOT NULL, +BALANCE INT UNSIGNED NOT NULL, +DEPOSIT_COUNT INT UNSIGNED NOT NULL, +DEPOSIT_SUM INT UNSIGNED NOT NULL, +WITHDRAWAL_COUNT INT UNSIGNED NOT NULL, +WITHDRAWAL_SUM INT UNSIGNED NOT NULL, +PURGED INT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (TIME,ACCOUNT_TYPE)) +ENGINE = NDB; +CREATE TABLE ACCOUNT ( ACCOUNT_ID INT UNSIGNED NOT NULL, +OWNER INT UNSIGNED NOT NULL, +BALANCE INT UNSIGNED NOT NULL, +ACCOUNT_TYPE INT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (ACCOUNT_ID)) +ENGINE = NDB; +CREATE TABLE TRANSACTION ( TRANSACTION_ID BIGINT UNSIGNED NOT NULL, +ACCOUNT INT UNSIGNED NOT NULL, +ACCOUNT_TYPE INT UNSIGNED NOT NULL, +OTHER_ACCOUNT INT UNSIGNED NOT NULL, +TRANSACTION_TYPE INT UNSIGNED NOT NULL, +TIME BIGINT UNSIGNED NOT NULL, +AMOUNT INT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (TRANSACTION_ID,ACCOUNT)) +ENGINE = NDB; +CREATE TABLE SYSTEM_VALUES ( SYSTEM_VALUES_ID INT UNSIGNED NOT NULL, +VALUE BIGINT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (SYSTEM_VALUES_ID)) +ENGINE = NDB; +CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE_ID INT UNSIGNED NOT NULL, +DESCRIPTION CHAR(64) NOT NULL, +PRIMARY KEY USING HASH (ACCOUNT_TYPE_ID)) +ENGINE = NDB; +STOP SLAVE; +RESET SLAVE; +CREATE DATABASE IF NOT EXISTS BANK; +DROP DATABASE BANK; +CREATE DATABASE BANK; +RESET MASTER; +CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM cluster_replication.backup_info; +LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info; +@the_backup_id:=backup_id +<the_backup_id> +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +@the_epoch:=MAX(epoch) +<the_epoch> +SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) +FROM cluster_replication.binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1; +@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) +<the_pos> master-bin.000001 +CHANGE MASTER TO +master_log_file = 'master-bin.000001', +master_log_pos = <the_pos>; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +START SLAVE; +use test; +create table t1 (a int primary key) engine=ndb; +insert into t1 values (1); +drop table t1; diff --git a/mysql-test/r/rpl_ndb_basic.result b/mysql-test/r/rpl_ndb_basic.result new file mode 100644 index 00000000000..0fe681622c9 --- /dev/null +++ b/mysql-test/r/rpl_ndb_basic.result @@ -0,0 +1,90 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', +`nom` char(4) default NULL, +`prenom` char(4) default NULL, +PRIMARY KEY (`nid`)) +ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"); +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +delete from t1; +INSERT INTO t1 VALUES(1,"XYZ2","ABC2"); +select * from t1 order by nid; +nid nom prenom +1 XYZ2 ABC2 +select * from t1 order by nid; +nid nom prenom +1 XYZ2 ABC2 +DROP table t1; +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', +`nom` char(4) default NULL, +`prenom` char(4) default NULL) +ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"),(2,"AAA","BBB"),(3,"CCC","DDD"); +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +2 AAA BBB +3 CCC DDD +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +2 AAA BBB +3 CCC DDD +delete from t1 where nid = 2; +INSERT INTO t1 VALUES(4,"EEE","FFF"); +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +3 CCC DDD +4 EEE FFF +select * from t1 order by nid; +nid nom prenom +1 XYZ1 ABC1 +3 CCC DDD +4 EEE FFF +UPDATE t1 set nid=nid+1; +UPDATE t1 set nom="CCP" where nid = 4; +select * from t1 order by nid; +nid nom prenom +2 XYZ1 ABC1 +4 CCP DDD +5 EEE FFF +select * from t1 order by nid; +nid nom prenom +2 XYZ1 ABC1 +4 CCP DDD +5 EEE FFF +DROP table t1; +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', +`nom` char(4) default NULL, +`prenom` char(4) default NULL, +PRIMARY KEY USING HASH (`nid`)) +ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"); +BEGIN; +UPDATE t1 SET `nom`="LOCK" WHERE `nid`=1; +set GLOBAL slave_transaction_retries=1; +UPDATE t1 SET `nom`="DEAD" WHERE `nid`=1; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes No <Replicate_Ignore_Table> 146 Error in Write_rows event: error during transaction execution on table test.t1 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +set GLOBAL slave_transaction_retries=10; +START SLAVE; +select * from t1 order by nid; +nid nom prenom +1 LOCK ABC1 +COMMIT; +select * from t1 order by nid; +nid nom prenom +1 DEAD ABC1 +DROP TABLE t1; diff --git a/mysql-test/r/rpl_ndb_disk.result b/mysql-test/r/rpl_ndb_disk.result new file mode 100644 index 00000000000..24488b8e62d --- /dev/null +++ b/mysql-test/r/rpl_ndb_disk.result @@ -0,0 +1,73 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +DROP TABLE IF EXISTS t1; +CREATE LOGFILE GROUP lg1 +ADD UNDOFILE 'undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +alter logfile group lg1 +add undofile 'undofile02.dat' +initial_size 4M engine=ndb; +CREATE TABLESPACE ts1 +ADD DATAFILE 'datafile.dat' +USE LOGFILE GROUP lg1 +INITIAL_SIZE 12M +ENGINE NDB; +alter tablespace ts1 +add datafile 'datafile02.dat' +initial_size 4M engine=ndb; +CREATE TABLE t1 +(pk1 int not null primary key, b int not null, c int not null) +tablespace ts1 storage disk +engine ndb; +insert into t1 values (1,2,3); +select * from t1 order by pk1; +pk1 b c +1 2 3 +select * from t1 order by pk1; +pk1 b c +1 2 3 +show binlog events; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 +master-bin.000001 102 Query 1 188 use `test`; DROP TABLE IF EXISTS t1 +master-bin.000001 188 Query 1 353 CREATE LOGFILE GROUP lg1 +ADD UNDOFILE 'undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB +master-bin.000001 353 Query 1 496 alter logfile group lg1 +add undofile 'undofile02.dat' +initial_size 4M engine=ndb +master-bin.000001 496 Query 1 658 CREATE TABLESPACE ts1 +ADD DATAFILE 'datafile.dat' +USE LOGFILE GROUP lg1 +INITIAL_SIZE 12M +ENGINE NDB +master-bin.000001 658 Query 1 798 alter tablespace ts1 +add datafile 'datafile02.dat' +initial_size 4M engine=ndb +master-bin.000001 798 Query 1 978 use `test`; CREATE TABLE t1 +(pk1 int not null primary key, b int not null, c int not null) +tablespace ts1 storage disk +engine ndb +master-bin.000001 978 Query 1 1042 BEGIN +master-bin.000001 1042 Table_map 1 65 cluster_replication.apply_status +master-bin.000001 1107 Write_rows 1 107 +master-bin.000001 1149 Table_map 1 148 test.t1 +master-bin.000001 1190 Write_rows 1 190 +master-bin.000001 1232 Query 1 1297 COMMIT +drop table t1; +alter tablespace ts1 +drop datafile 'datafile.dat' +engine=ndb; +alter tablespace ts1 +drop datafile 'datafile02.dat' +engine=ndb; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg1 ENGINE=NDB; diff --git a/mysql-test/r/rpl_ndb_idempotent.result b/mysql-test/r/rpl_ndb_idempotent.result new file mode 100644 index 00000000000..054ff599c23 --- /dev/null +++ b/mysql-test/r/rpl_ndb_idempotent.result @@ -0,0 +1,72 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES ("row1","will go away",1); +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row1 will go away 1 +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +@the_epoch:=MAX(epoch) +<the_epoch> +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row1 will go away 1 +SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) +FROM cluster_replication.binlog_index WHERE epoch = <the_epoch> ; +@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) +<the_pos> master-bin.000001 +INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); +DELETE FROM t1 WHERE c3 = 1; +UPDATE t1 SET c2="should go away" WHERE c3 = 2; +UPDATE t1 SET c2="C" WHERE c3 = 3; +DELETE FROM t1 WHERE c3 = 2; +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +STOP SLAVE; +CHANGE MASTER TO +master_log_file = 'master-bin.000001', +master_log_pos = <the_pos> ; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 No No <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +START SLAVE; +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +STOP SLAVE; +DROP TABLE t1; +RESET master; +DROP TABLE t1; +RESET slave; +START SLAVE; +CREATE TABLE t1 (c1 CHAR(15) NOT NULL, c2 CHAR(15) NOT NULL, c3 INT NOT NULL, PRIMARY KEY (c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES ("row1","remove on slave",1); +DELETE FROM t1; +BEGIN; +UPDATE t1 SET c2="does not exist" WHERE c3=1; +INSERT INTO t1 VALUES ("row2","new on slave",2); +COMMIT; +SELECT * FROM t1; +c1 c2 c3 +row2 new on slave 2 +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> diff --git a/mysql-test/r/rpl_ndb_load.result b/mysql-test/r/rpl_ndb_load.result new file mode 100644 index 00000000000..e51f0096557 --- /dev/null +++ b/mysql-test/r/rpl_ndb_load.result @@ -0,0 +1,42 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +DROP DATABASE IF EXISTS TEST_DB; +CREATE DATABASE TEST_DB; +USE TEST_DB; +CREATE TABLE SUBSCRIBER +( NUMBER CHAR(12) BINARY NOT NULL, +NAME CHAR(32) BINARY NOT NULL, +GROUP_ID INT UNSIGNED NOT NULL, +LOCATION INT UNSIGNED NOT NULL, +SESSIONS INT UNSIGNED NOT NULL, +CHANGED_BY CHAR(32) BINARY NOT NULL, +CHANGED_TIME CHAR(32) BINARY NOT NULL, +PRIMARY KEY USING HASH (NUMBER)) +ENGINE = NDB; +CREATE TABLE GROUP2 +( GROUP_ID INT UNSIGNED NOT NULL, +GROUP_NAME CHAR(32) BINARY NOT NULL, +ALLOW_READ CHAR(1) BINARY NOT NULL, +ALLOW_INSERT INT UNSIGNED NOT NULL, +ALLOW_DELETE INT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (GROUP_ID)) +ENGINE = NDB; +CREATE TABLE SESSION +( NUMBER CHAR(12) BINARY NOT NULL, +SERVER_ID INT UNSIGNED NOT NULL, +DATA BINARY(2000) NOT NULL, +PRIMARY KEY USING HASH (NUMBER,SERVER_ID)) +ENGINE = NDB; +CREATE TABLE SERVER +( SUFFIX CHAR(2) BINARY NOT NULL, +SERVER_ID INT UNSIGNED NOT NULL, +NAME CHAR(32) BINARY NOT NULL, +NO_OF_READ INT UNSIGNED NOT NULL, +NO_OF_INSERT INT UNSIGNED NOT NULL, +NO_OF_DELETE INT UNSIGNED NOT NULL, +PRIMARY KEY USING HASH (SUFFIX, SERVER_ID)) +ENGINE = NDB; diff --git a/mysql-test/r/rpl_ndb_multi.result b/mysql-test/r/rpl_ndb_multi.result new file mode 100644 index 00000000000..d3afd787613 --- /dev/null +++ b/mysql-test/r/rpl_ndb_multi.result @@ -0,0 +1,55 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ; +reset master; +SHOW TABLES; +Tables_in_test +t1 +INSERT INTO t1 VALUES ("row1","will go away",1); +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row1 will go away 1 +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row1 will go away 1 +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +@the_epoch:=MAX(epoch) +<the_epoch> +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row1 will go away 1 +stop slave; +SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) +FROM cluster_replication.binlog_index WHERE epoch = <the_epoch> ; +@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) +102 master-bin1.000001 +CHANGE MASTER TO +master_port=<MASTER_PORT1>, +master_log_file = 'master-bin1.000001', +master_log_pos = 102 ; +start slave; +INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); +DELETE FROM t1 WHERE c3 = 1; +UPDATE t1 SET c2="should go away" WHERE c3 = 2; +UPDATE t1 SET c2="C" WHERE c3 = 3; +DELETE FROM t1 WHERE c3 = 2; +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +INSERT INTO t1 VALUES ("row5","E",5); +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +row5 E 5 +SELECT * FROM t1 ORDER BY c3; +c1 c2 c3 +row3 C 3 +row4 D 4 +row5 E 5 +STOP SLAVE; diff --git a/mysql-test/r/rpl_ndb_sync.result b/mysql-test/r/rpl_ndb_sync.result new file mode 100644 index 00000000000..44d0efa7e5a --- /dev/null +++ b/mysql-test/r/rpl_ndb_sync.result @@ -0,0 +1,95 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +STOP SLAVE; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; +CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES (1,1,"row1"),(0,1,"row2"),(1,0,"row3"),(0,0,"row4"); +CREATE TABLE t2 (c1 CHAR(15), c2 BIT(1) NOT NULL, c3 BIT(1) NOT NULL, PRIMARY KEY(c1)) ENGINE = NDB ; +INSERT INTO t2 VALUES ("ABC",1,1),("BCDEF",0,1),("CD",1,0),("DEFGHIJKL",0,0); +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +hex(c1) hex(c2) c3 +1 1 row1 +0 1 row2 +1 0 row3 +0 0 row4 +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; +hex(c2) hex(c3) c1 +1 1 ABC +0 1 BCDEF +1 0 CD +0 0 DEFGHIJKL +CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT); +DELETE FROM cluster_replication.backup_info; +LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info; +@the_backup_id:=backup_id +<the_backup_id> +UPDATE t1 SET c2=0 WHERE c3="row2"; +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +hex(c1) hex(c2) c3 +1 1 row1 +0 0 row2 +1 0 row3 +0 0 row4 +SHOW TABLES; +Tables_in_ndbsynctest +DROP DATABASE ndbsynctest; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; +SHOW TABLES; +Tables_in_ndbsynctest +t1 +t2 +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +hex(c1) hex(c2) c3 +1 1 row1 +0 1 row2 +1 0 row3 +0 0 row4 +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; +hex(c2) hex(c3) c1 +1 1 ABC +0 1 BCDEF +1 0 CD +0 0 DEFGHIJKL +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +@the_epoch:=MAX(epoch) +<the_epoch> +SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) +FROM cluster_replication.binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1; +@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1) +<the_pos> master-bin.000001 +CHANGE MASTER TO +master_log_file = 'master-bin.000001', +master_log_pos = <the_pos> ; +START SLAVE; +SHOW SLAVE STATUS; +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master +<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master> +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +hex(c1) hex(c2) c3 +1 1 row1 +0 0 row2 +1 0 row3 +0 0 row4 +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; +hex(c2) hex(c3) c1 +1 1 ABC +0 1 BCDEF +1 0 CD +0 0 DEFGHIJKL +DROP DATABASE ndbsynctest; +STOP SLAVE; +reset master; +select * from cluster_replication.binlog_index; +Position File epoch inserts updates deletes schemaops +reset slave; +select * from cluster_replication.apply_status; +server_id epoch diff --git a/mysql-test/r/rpl_row_basic_11bugs.result b/mysql-test/r/rpl_row_basic_11bugs.result index a5267a2bce2..772a22babfe 100644 --- a/mysql-test/r/rpl_row_basic_11bugs.result +++ b/mysql-test/r/rpl_row_basic_11bugs.result @@ -9,6 +9,7 @@ CREATE DATABASE test_ignore; SHOW DATABASES; Database information_schema +cluster_replication mysql test test_ignore @@ -33,6 +34,7 @@ master-bin.000001 235 Write_rows 1 282 SHOW DATABASES; Database information_schema +cluster_replication mysql test USE test; diff --git a/mysql-test/r/rpl_row_basic_7ndb.result b/mysql-test/r/rpl_row_basic_7ndb.result new file mode 100644 index 00000000000..634d5b2b5d5 --- /dev/null +++ b/mysql-test/r/rpl_row_basic_7ndb.result @@ -0,0 +1,426 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE TABLE t1 (C1 CHAR(1), C2 CHAR(1), INDEX (C1)) ENGINE = 'NDB' ; +SELECT * FROM t1; +C1 C2 +SELECT * FROM t1; +C1 C2 +INSERT INTO t1 VALUES ('A','B'), ('X','Y'), ('X','X'); +INSERT INTO t1 VALUES ('A','C'), ('X','Z'), ('A','A'); +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A A +A B +A C +X X +X Y +X Z +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A A +A B +A C +X X +X Y +X Z +DELETE FROM t1 WHERE C1 = C2; +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A B +A C +X Y +X Z +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A B +A C +X Y +X Z +UPDATE t1 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C'; +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A B +A I +X Y +X Z +SELECT * FROM t1 ORDER BY C1,C2; +C1 C2 +A B +A I +X Y +X Z +UPDATE t1 SET c2 = 'Q' WHERE c1 = 'A' AND c2 = 'N'; +SELECT * FROM t1 ORDER BY c1,c2; +C1 C2 +A B +A I +X Y +X Z +SELECT * FROM t1 ORDER BY c1,c2; +C1 C2 +A B +A I +X Y +X Z +CREATE TABLE t2 (c1 INT, c12 char(1), c2 INT, PRIMARY KEY (c1)) ENGINE = 'NDB' ; +INSERT INTO t2 +VALUES (1,'A',2), (2,'A',4), (3,'A',9), (4,'A',15), (5,'A',25), +(6,'A',35), (7,'A',50), (8,'A',64), (9,'A',81); +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 2 +2 A 4 +3 A 9 +4 A 15 +5 A 25 +6 A 35 +7 A 50 +8 A 64 +9 A 81 +SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2; +c1 c12 c2 +2 A 4 +3 A 9 +5 A 25 +8 A 64 +9 A 81 +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 2 +2 A 4 +3 A 9 +4 A 15 +5 A 25 +6 A 35 +7 A 50 +8 A 64 +9 A 81 +SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2; +c1 c12 c2 +2 A 4 +3 A 9 +5 A 25 +8 A 64 +9 A 81 +UPDATE t2 SET c2 = c1*c1 WHERE c2 != c1*c1; +SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +4 A 16 +5 A 25 +6 A 36 +7 A 49 +8 A 64 +9 A 81 +SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +4 A 16 +5 A 25 +6 A 36 +7 A 49 +8 A 64 +9 A 81 +UPDATE t2 SET c12 = 'Q' WHERE c1 = 1 AND c2 = 999; +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +4 A 16 +5 A 25 +6 A 36 +7 A 49 +8 A 64 +9 A 81 +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +4 A 16 +5 A 25 +6 A 36 +7 A 49 +8 A 64 +9 A 81 +DELETE FROM t2 WHERE c1 % 4 = 0; +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +5 A 25 +6 A 36 +7 A 49 +9 A 81 +SELECT * FROM t2 ORDER BY c1,c2; +c1 c12 c2 +1 A 1 +2 A 4 +3 A 9 +5 A 25 +6 A 36 +7 A 49 +9 A 81 +UPDATE t2 SET c12='X'; +CREATE TABLE t3 (C1 CHAR(1), C2 CHAR(1), pk1 INT, C3 CHAR(1), pk2 INT, PRIMARY KEY (pk1,pk2)) ENGINE = 'NDB' ; +INSERT INTO t3 VALUES ('A','B',1,'B',1), ('X','Y',2,'B',1), ('X','X',3,'B',1); +INSERT INTO t3 VALUES ('A','C',1,'B',2), ('X','Z',2,'B',2), ('A','A',3,'B',2); +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A A 3 B 2 +A B 1 B 1 +A C 1 B 2 +X X 3 B 1 +X Y 2 B 1 +X Z 2 B 2 +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A A 3 B 2 +A B 1 B 1 +A C 1 B 2 +X X 3 B 1 +X Y 2 B 1 +X Z 2 B 2 +DELETE FROM t3 WHERE C1 = C2; +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A B 1 B 1 +A C 1 B 2 +X Y 2 B 1 +X Z 2 B 2 +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A B 1 B 1 +A C 1 B 2 +X Y 2 B 1 +X Z 2 B 2 +UPDATE t3 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C'; +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A B 1 B 1 +A I 1 B 2 +X Y 2 B 1 +X Z 2 B 2 +SELECT * FROM t3 ORDER BY C1,C2; +C1 C2 pk1 C3 pk2 +A B 1 B 1 +A I 1 B 2 +X Y 2 B 1 +X Z 2 B 2 +CREATE TABLE t6 (C1 CHAR(1), C2 CHAR(1), C3 INT) ENGINE = 'NDB' ; +INSERT INTO t6 VALUES ('A','B',1), ('X','Y',2), ('X','X',3); +INSERT INTO t6 VALUES ('A','C',4), ('X','Z',5), ('A','A',6); +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +X X 3 +A C 4 +X Z 5 +A A 6 +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +X X 3 +A C 4 +X Z 5 +A A 6 +DELETE FROM t6 WHERE C1 = C2; +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +A C 4 +X Z 5 +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +A C 4 +X Z 5 +UPDATE t6 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C'; +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +A I 4 +X Z 5 +SELECT * FROM t6 ORDER BY C3; +C1 C2 C3 +A B 1 +X Y 2 +A I 4 +X Z 5 +CREATE TABLE t5 (C1 CHAR(1), C2 CHAR(1), C3 INT PRIMARY KEY) ENGINE = 'NDB' ; +INSERT INTO t5 VALUES ('A','B',1), ('X','Y',2), ('X','X',3); +INSERT INTO t5 VALUES ('A','C',4), ('X','Z',5), ('A','A',6); +UPDATE t5,t2,t3 SET t5.C2='Q', t2.c12='R', t3.C3 ='S' WHERE t5.C1 = t2.c12 AND t5.C1 = t3.C1; +SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2; +C1 C2 C3 c1 c12 c2 C1 C2 pk1 C3 pk2 +X Q 2 1 R 1 X Y 2 S 1 +X Q 2 1 R 1 X Z 2 S 2 +X Q 2 2 R 4 X Y 2 S 1 +X Q 2 2 R 4 X Z 2 S 2 +X Q 2 3 R 9 X Y 2 S 1 +X Q 2 3 R 9 X Z 2 S 2 +X Q 2 5 R 25 X Y 2 S 1 +X Q 2 5 R 25 X Z 2 S 2 +X Q 2 6 R 36 X Y 2 S 1 +X Q 2 6 R 36 X Z 2 S 2 +X Q 2 7 R 49 X Y 2 S 1 +X Q 2 7 R 49 X Z 2 S 2 +X Q 2 9 R 81 X Y 2 S 1 +X Q 2 9 R 81 X Z 2 S 2 +X Q 3 1 R 1 X Y 2 S 1 +X Q 3 1 R 1 X Z 2 S 2 +X Q 3 2 R 4 X Y 2 S 1 +X Q 3 2 R 4 X Z 2 S 2 +X Q 3 3 R 9 X Y 2 S 1 +X Q 3 3 R 9 X Z 2 S 2 +X Q 3 5 R 25 X Y 2 S 1 +X Q 3 5 R 25 X Z 2 S 2 +X Q 3 6 R 36 X Y 2 S 1 +X Q 3 6 R 36 X Z 2 S 2 +X Q 3 7 R 49 X Y 2 S 1 +X Q 3 7 R 49 X Z 2 S 2 +X Q 3 9 R 81 X Y 2 S 1 +X Q 3 9 R 81 X Z 2 S 2 +X Q 5 1 R 1 X Y 2 S 1 +X Q 5 1 R 1 X Z 2 S 2 +X Q 5 2 R 4 X Y 2 S 1 +X Q 5 2 R 4 X Z 2 S 2 +X Q 5 3 R 9 X Y 2 S 1 +X Q 5 3 R 9 X Z 2 S 2 +X Q 5 5 R 25 X Y 2 S 1 +X Q 5 5 R 25 X Z 2 S 2 +X Q 5 6 R 36 X Y 2 S 1 +X Q 5 6 R 36 X Z 2 S 2 +X Q 5 7 R 49 X Y 2 S 1 +X Q 5 7 R 49 X Z 2 S 2 +X Q 5 9 R 81 X Y 2 S 1 +X Q 5 9 R 81 X Z 2 S 2 +SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2; +C1 C2 C3 c1 c12 c2 C1 C2 pk1 C3 pk2 +X Q 2 1 R 1 X Y 2 S 1 +X Q 2 1 R 1 X Z 2 S 2 +X Q 2 2 R 4 X Y 2 S 1 +X Q 2 2 R 4 X Z 2 S 2 +X Q 2 3 R 9 X Y 2 S 1 +X Q 2 3 R 9 X Z 2 S 2 +X Q 2 5 R 25 X Y 2 S 1 +X Q 2 5 R 25 X Z 2 S 2 +X Q 2 6 R 36 X Y 2 S 1 +X Q 2 6 R 36 X Z 2 S 2 +X Q 2 7 R 49 X Y 2 S 1 +X Q 2 7 R 49 X Z 2 S 2 +X Q 2 9 R 81 X Y 2 S 1 +X Q 2 9 R 81 X Z 2 S 2 +X Q 3 1 R 1 X Y 2 S 1 +X Q 3 1 R 1 X Z 2 S 2 +X Q 3 2 R 4 X Y 2 S 1 +X Q 3 2 R 4 X Z 2 S 2 +X Q 3 3 R 9 X Y 2 S 1 +X Q 3 3 R 9 X Z 2 S 2 +X Q 3 5 R 25 X Y 2 S 1 +X Q 3 5 R 25 X Z 2 S 2 +X Q 3 6 R 36 X Y 2 S 1 +X Q 3 6 R 36 X Z 2 S 2 +X Q 3 7 R 49 X Y 2 S 1 +X Q 3 7 R 49 X Z 2 S 2 +X Q 3 9 R 81 X Y 2 S 1 +X Q 3 9 R 81 X Z 2 S 2 +X Q 5 1 R 1 X Y 2 S 1 +X Q 5 1 R 1 X Z 2 S 2 +X Q 5 2 R 4 X Y 2 S 1 +X Q 5 2 R 4 X Z 2 S 2 +X Q 5 3 R 9 X Y 2 S 1 +X Q 5 3 R 9 X Z 2 S 2 +X Q 5 5 R 25 X Y 2 S 1 +X Q 5 5 R 25 X Z 2 S 2 +X Q 5 6 R 36 X Y 2 S 1 +X Q 5 6 R 36 X Z 2 S 2 +X Q 5 7 R 49 X Y 2 S 1 +X Q 5 7 R 49 X Z 2 S 2 +X Q 5 9 R 81 X Y 2 S 1 +X Q 5 9 R 81 X Z 2 S 2 +CREATE TABLE t4 (C1 CHAR(1) PRIMARY KEY, B1 BIT(1), B2 BIT(1) NOT NULL DEFAULT 0, C2 CHAR(1) NOT NULL DEFAULT 'A') ENGINE = 'NDB' ; +INSERT INTO t4 SET C1 = 1; +SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1; +C1 HEX(B1) HEX(B2) +1 NULL 0 +SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1; +C1 HEX(B1) HEX(B2) +1 NULL 0 +CREATE TABLE t7 (C1 INT PRIMARY KEY, C2 INT) ENGINE = 'NDB' ; + +--- on slave: original values --- +INSERT INTO t7 VALUES (1,3), (2,6), (3,9); +SELECT * FROM t7 ORDER BY C1; +C1 C2 +1 3 +2 6 +3 9 + +--- on master: new values inserted --- +INSERT INTO t7 VALUES (1,2), (2,4), (3,6); +SELECT * FROM t7 ORDER BY C1; +C1 C2 +1 2 +2 4 +3 6 + +--- on slave: old values should be overwritten by replicated values --- +SELECT * FROM t7 ORDER BY C1; +C1 C2 +1 2 +2 4 +3 6 + +--- on master --- +DROP TABLE t7; +CREATE TABLE t7 (a INT PRIMARY KEY, b INT UNIQUE, c INT UNIQUE) ENGINE = 'NDB' ; +INSERT INTO t7 VALUES (99,99,99); +INSERT INTO t7 VALUES (99,22,33); +ERROR 23000: Duplicate entry '99' for key 1 +INSERT INTO t7 VALUES (11,99,33); +ERROR 23000: Duplicate entry '11' for key 1 +INSERT INTO t7 VALUES (11,22,99); +ERROR 23000: Duplicate entry '11' for key 1 +SELECT * FROM t7 ORDER BY a; +a b c +99 99 99 + +--- on slave --- +SELECT * FROM t7 ORDER BY a; +a b c +99 99 99 +INSERT INTO t7 VALUES (1,2,3), (2,4,6), (3,6,9); +SELECT * FROM t7 ORDER BY a; +a b c +1 2 3 +2 4 6 +3 6 9 +99 99 99 + +--- on master --- +INSERT INTO t7 VALUES (2,4,8); + +--- on slave --- +SELECT * FROM t7 ORDER BY a; +a b c +1 2 3 +2 4 8 +3 6 9 +99 99 99 +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; diff --git a/mysql-test/r/schema.result b/mysql-test/r/schema.result index 538abd8d039..a1b3fa9f1b3 100644 --- a/mysql-test/r/schema.result +++ b/mysql-test/r/schema.result @@ -6,6 +6,7 @@ foo CREATE DATABASE `foo` /*!40100 DEFAULT CHARACTER SET latin1 */ show schemas; Database information_schema +cluster_replication foo mysql test diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index e3487b1bdf6..a78230e63eb 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -53,6 +53,7 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I show databases; Database information_schema +cluster_replication mysql test show databases like "test%"; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 2e786b669eb..499cae16412 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -20,6 +20,11 @@ subselect : Bug#15706 type_time : Bug#15805 #rpl000002 : Bug#15920 Temporary tables are not binlogged in SBR #ps_7ndb : Bug#15923 Core dump in RBR mode when executing test suite +ps_7ndb : dbug assert in RBR mode when executing test suite rpl_ddl : Bug#15963 SBR does not show "Definer" correctly mysqlslap : Bug#16167 events : Affects flush test case. A table lock not released somewhere +ndb_autodiscover : TBF with CR +ndb_autodiscover2 : TBF with CR +ndb_binlog_basic : Results are not deterministic, Tomas will fix +rpl_ndb_basic : Bug#16228 diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index ee309fbd7c9..f6b19c2209a 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -519,9 +519,6 @@ echo $novar1; --exec echo "let $=hi;" | $MYSQL_TEST 2>&1 --error 1 ---exec echo "let hi=hi;" | $MYSQL_TEST 2>&1 - ---error 1 --exec echo "let $1 hi;" | $MYSQL_TEST 2>&1 --error 1 diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test index 77ec6c5e066..a12ac20a259 100644 --- a/mysql-test/t/ndb_alter_table.test +++ b/mysql-test/t/ndb_alter_table.test @@ -142,23 +142,19 @@ INSERT INTO t1 VALUES (1,2,0),(18,19,4),(20,21,0); select c from t1 order by c; drop table t1; ---disable_ps_protocol -create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) -engine=ndb; -insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); -create index c on t1(c); -connection server2; -select * from t1 where b = 'two'; -connection server1; -alter table t1 drop index c; -connection server2; -# This should fail since index information is not automatically refreshed ---error 1015 -select * from t1 where b = 'two'; -select * from t1 where b = 'two'; -connection server1; -drop table t1; ---enable_ps_protocol +## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs +#create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) +#engine=ndb; +#insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); +#create index c on t1(c); +#connection server2; +#select * from t1 where c = 'two'; +#connection server1; +#alter table t1 drop index c; +#connection server2; +#select * from t1 where c = 'two'; +#connection server1; +#drop table t1; #--disable_warnings #DROP TABLE IF EXISTS t2; @@ -183,29 +179,32 @@ drop table t1; #select count(*) from t2; #drop table t2; -connection server1; -create table t3 (a int primary key) engine=ndbcluster; - -connection server2; -begin; -insert into t3 values (1); - -connection server1; -alter table t3 rename t4; - -connection server2; -# This should work as transaction is ongoing... -delete from t3; -insert into t3 values (1); -commit; - -# This should fail as its a new transaction ---error 1015 -select * from t3; -select * from t4; -drop table t4; -show tables; -connection server1; +## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs +#connection server1; +#create table t3 (a int primary key) engine=ndbcluster; + +#connection server2; +#begin; +#insert into t3 values (1); + +#connection server1; +#alter table t3 rename t4; + +#connection server2; +## with rbr the below will not work as the "alter" event +## explicitly invalidates the dictionary cache. +### This should work as transaction is ongoing... +##delete from t3; +##insert into t3 values (1); +#commit; + +## This should fail as its a new transaction +#--error 1146 +#select * from t3; +#select * from t4; +#drop table t4; +#show tables; +#connection server1; create table t1 ( ai bigint auto_increment, diff --git a/mysql-test/t/ndb_alter_table_row.test b/mysql-test/t/ndb_alter_table_row.test new file mode 100644 index 00000000000..5dbfa26289b --- /dev/null +++ b/mysql-test/t/ndb_alter_table_row.test @@ -0,0 +1,48 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc +-- source include/have_binlog_format_row.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connection server1; +create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) +engine=ndb; +insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); +create index c on t1(c); +connection server2; +select * from t1 where c = 'two'; +connection server1; +alter table t1 drop index c; +connection server2; +select * from t1 where c = 'two'; +connection server1; +drop table t1; + +connection server1; +create table t3 (a int primary key) engine=ndbcluster; + +connection server2; +begin; +insert into t3 values (1); + +connection server1; +alter table t3 rename t4; + +connection server2; +# with rbr the below will not work as the "alter" event +# explicitly invalidates the dictionary cache. +## This should work as transaction is ongoing... +#delete from t3; +#insert into t3 values (1); +commit; + +# This should fail as its a new transaction +--error 1146 +select * from t3; +select * from t4; +drop table t4; +show tables; +connection server1; diff --git a/mysql-test/t/ndb_alter_table_stm.test b/mysql-test/t/ndb_alter_table_stm.test new file mode 100644 index 00000000000..808c637d3bc --- /dev/null +++ b/mysql-test/t/ndb_alter_table_stm.test @@ -0,0 +1,51 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc +-- source include/have_binlog_format_statement.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connection server1; +create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) ) +engine=ndb; +insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three'); +create index c on t1(c); +connection server2; +select * from t1 where c = 'two'; +connection server1; +alter table t1 drop index c; +connection server2; +-- error 1015 +select * from t1 where c = 'two'; +select * from t1 where c = 'two'; +connection server1; +drop table t1; + +connection server1; +create table t3 (a int primary key) engine=ndbcluster; + +connection server2; +begin; +insert into t3 values (1); + +connection server1; +alter table t3 rename t4; + +connection server2; +# with rbr the below will not work as the "alter" event +# explicitly invalidates the dictionary cache. +# This should work as transaction is ongoing... +delete from t3; +insert into t3 values (1); +commit; + +# This should fail as its a new transaction +--error 1015 +select * from t3; +select * from t4; +drop table t4; +show tables; +connection server1; + diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 12aca73d82b..e99503843bd 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -6,6 +6,17 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; drop database if exists mysqltest; --enable_warnings +# workaround for bug#16445 +# remove to reproduce bug and run tests drom ndb start +# and with ndb_autodiscover disabled +CREATE TABLE t1 ( + pk1 INT NOT NULL PRIMARY KEY, + attr1 INT NOT NULL, + attr2 INT, + attr3 VARCHAR(10) +) ENGINE=ndbcluster; +drop table t1; + # # Basic test to show that the NDB # table handler is working diff --git a/mysql-test/t/ndb_binlog_basic.test b/mysql-test/t/ndb_binlog_basic.test new file mode 100644 index 00000000000..86443dcd831 --- /dev/null +++ b/mysql-test/t/ndb_binlog_basic.test @@ -0,0 +1,76 @@ +-- source include/have_ndb.inc +-- source include/have_binlog_format_row.inc + +--disable_warnings +drop table if exists t1, t2; +drop database if exists mysqltest; +create database mysqltest; +use mysqltest; +drop database mysqltest; +use test; +--enable_warnings + +# +# basic insert, update, delete test, alter, rename, drop +# check that binlog_index gets the right info +# + +create table t1 (a int primary key) engine=ndb; +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +save_master_pos; +--replace_column 1 # +select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index; + +delete from t1; +alter table t1 add (b int); +insert into t1 values (3,3),(4,4); +alter table t1 rename t2; + +# get all in one epoch +begin; +insert into t2 values (1,1),(2,2); +update t2 set b=1 where a=3; +delete from t2 where a=4; +commit; +drop table t2; + +# check that above is ok +# (save_master_pos waits for last gcp to complete, ensuring that we have +# the expected data in the binlog) +save_master_pos; +select inserts from cluster_replication.binlog_index where epoch > @max_epoch and inserts > 5; +select deletes from cluster_replication.binlog_index where epoch > @max_epoch and deletes > 5; +select inserts,updates,deletes from + cluster_replication.binlog_index where epoch > @max_epoch and updates > 0; +select schemaops from + cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0; + +# +# check that purge clears the binlog_index +# +flush logs; +--sleep 1 +purge master logs before now(); +select count(*) from cluster_replication.binlog_index; + +# +# several tables in different databases +# check that same table name in different databases don't mix up +# +create table t1 (a int primary key, b int) engine=ndb; +create database mysqltest; +use mysqltest; +create table t1 (c int, d int primary key) engine=ndb; +use test; + +insert into mysqltest.t1 values (2,1),(2,2); +save_master_pos; +--replace_column 1 # +select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index; + +drop table t1; +drop database mysqltest; +select inserts,updates,deletes from + cluster_replication.binlog_index where epoch > @max_epoch and inserts > 0; +select schemaops from + cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0; diff --git a/mysql-test/t/ndb_binlog_multi.test b/mysql-test/t/ndb_binlog_multi.test new file mode 100644 index 00000000000..71edd70c14f --- /dev/null +++ b/mysql-test/t/ndb_binlog_multi.test @@ -0,0 +1,70 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/have_binlog_format_row.inc + +--disable_warnings +connection server2; +drop table if exists t1,t2; +connection server1; +drop table if exists t1,t2; +--enable_warnings + +# +# basic test to see if one server sees the table from the other +# and sets up the replication correctly +# + +# no tables and nothing in cluster_replication.binlog_index; +connection server1; +SHOW TABLES; + +# create table on the other server +connection server2; +CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB; + +# make sure the first mysql server knows about this table +connection server1; +show tables; + +# insert something on server2 +connection server2; +INSERT INTO t2 VALUES (1,1),(2,2); +select * from t2 order by a; +save_master_pos; +--replace_column 1 <the_epoch> +SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM + cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1; +let $the_epoch= `SELECT @the_epoch`; + +# see if we got something on server1 +connection server1; +SELECT * FROM t2 ORDER BY a; +--replace_result $the_epoch <the_epoch> +eval SELECT inserts,updates,deletes,schemaops FROM + cluster_replication.binlog_index WHERE epoch=$the_epoch; + +# drop the table on server1 +DROP TABLE t2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB; +INSERT INTO t1 VALUES (1),(2); +save_master_pos; +--replace_column 1 <the_epoch2> +SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM + cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1; +let $the_epoch2= `SELECT @the_epoch2`; + +--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2> +eval SELECT inserts,updates,deletes,schemaops FROM + cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2; +drop table t1; + +# flush on server2 +connection server2; +SHOW TABLES; +--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2> +eval SELECT inserts,updates,deletes,schemaops FROM + cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2; + +# reset +connection server1; + diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 0d34e698e28..0cc8f57e4b5 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -1,9 +1,13 @@ -- source include/have_ndb.inc -- source include/have_multi_ndb.inc -- source include/not_embedded.inc +-- source include/have_binlog_format_statement.inc --disable_warnings +connection server2; +drop table if exists t1, t2, t3, t4; +connection server1; drop table if exists t1, t2, t3, t4; --enable_warnings diff --git a/mysql-test/t/ndb_multi_row.test b/mysql-test/t/ndb_multi_row.test new file mode 100644 index 00000000000..54d8666dfe1 --- /dev/null +++ b/mysql-test/t/ndb_multi_row.test @@ -0,0 +1,78 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc +-- source include/have_binlog_format_row.inc + + +--disable_warnings +connection server2; +drop table if exists t1, t2, t3, t4; +connection server1; +drop table if exists t1, t2, t3, t4; +--enable_warnings + +flush status; + +# Create test tables on server1 +create table t1 (a int) engine=ndbcluster; +create table t2 (a int) engine=ndbcluster; +insert into t1 value (2); +insert into t2 value (3); +select * from t1; +select * from t2; +show status like 'handler_discover%'; + +# Check dropping and recreating table on same server +connect (con1,localhost,,,test); +connect (con2,localhost,,,test); +connection con1; +select * from t1; +connection con2; +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +connection con1; +select * from t1; + +# Check dropping and recreating table on different server +connection server2; +show status like 'handler_discover%'; +drop table t1; +create table t1 (a int) engine=ndbcluster; +insert into t1 value (2); +connection server1; +## Currently a retry is required remotely +#--error 1412 +#select * from t1; +#show warnings; +#flush table t1; +# Table definition change should be propagated automatically +select * from t1; + +# Connect to server2 and use the tables from there +connection server2; +flush status; +select * from t1; +update t1 set a=3 where a=2; +show status like 'handler_discover%'; + +# Create a new table on server2 +create table t3 (a int not null primary key, b varchar(22), +c int, last_col text) engine=ndb; +insert into t3 values(1, 'Hi!', 89, 'Longtext column'); +create table t4 (pk int primary key, b int) engine=ndb; + +# Check that the tables are accessible from server1 +connection server1; +select * from t1; +select * from t3; +show status like 'handler_discover%'; +show tables like 't4'; +show status like 'handler_discover%'; +show tables; + +drop table t1, t2, t3, t4; +connection server2; +drop table t1, t3, t4; + +# End of 4.1 tests diff --git a/mysql-test/t/rpl_ndb_bank.test b/mysql-test/t/rpl_ndb_bank.test new file mode 100644 index 00000000000..c79c85558fe --- /dev/null +++ b/mysql-test/t/rpl_ndb_bank.test @@ -0,0 +1,207 @@ +# +# Currently this test only runs in the source tree with the +# ndb/test programs compiled. +# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_bank +# +# 1. start a "bank" application running on the master cluster +# 2. perform online sync of slave +# 3. periodically check consistency of slave +# 4. stop the bank application +# 5. check that the slave and master BANK databases are the same +# + +# kill any trailing processes +--system killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL || true + +--source include/have_ndb.inc +--source include/have_ndb_extra.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--disable_warnings +# initialize master +--connection master +CREATE DATABASE IF NOT EXISTS BANK; +DROP DATABASE BANK; +CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin; +--enable_warnings + +# +# These tables should correspond to the table definitions in +# storage/ndb/test/src/NDBT_Tables.cpp +# +--connection master +USE BANK; +CREATE TABLE GL ( TIME BIGINT UNSIGNED NOT NULL, + ACCOUNT_TYPE INT UNSIGNED NOT NULL, + BALANCE INT UNSIGNED NOT NULL, + DEPOSIT_COUNT INT UNSIGNED NOT NULL, + DEPOSIT_SUM INT UNSIGNED NOT NULL, + WITHDRAWAL_COUNT INT UNSIGNED NOT NULL, + WITHDRAWAL_SUM INT UNSIGNED NOT NULL, + PURGED INT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (TIME,ACCOUNT_TYPE)) + ENGINE = NDB; + +CREATE TABLE ACCOUNT ( ACCOUNT_ID INT UNSIGNED NOT NULL, + OWNER INT UNSIGNED NOT NULL, + BALANCE INT UNSIGNED NOT NULL, + ACCOUNT_TYPE INT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (ACCOUNT_ID)) + ENGINE = NDB; + +CREATE TABLE TRANSACTION ( TRANSACTION_ID BIGINT UNSIGNED NOT NULL, + ACCOUNT INT UNSIGNED NOT NULL, + ACCOUNT_TYPE INT UNSIGNED NOT NULL, + OTHER_ACCOUNT INT UNSIGNED NOT NULL, + TRANSACTION_TYPE INT UNSIGNED NOT NULL, + TIME BIGINT UNSIGNED NOT NULL, + AMOUNT INT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (TRANSACTION_ID,ACCOUNT)) + ENGINE = NDB; + +CREATE TABLE SYSTEM_VALUES ( SYSTEM_VALUES_ID INT UNSIGNED NOT NULL, + VALUE BIGINT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (SYSTEM_VALUES_ID)) + ENGINE = NDB; + +CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE_ID INT UNSIGNED NOT NULL, + DESCRIPTION CHAR(64) NOT NULL, + PRIMARY KEY USING HASH (ACCOUNT_TYPE_ID)) + ENGINE = NDB; + +# +# create "BANK" application +# +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankCreator >> $NDB_TOOLS_OUTPUT + +# +# start main loop +# repeat backup-restore-check +# + +# set this high if testing to run many syncs in loop +--let $2=1 +while ($2) +{ + +# +# start "BANK" application +# +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTimer -w 5 >> $NDB_TOOLS_OUTPUT & +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankMakeGL >> $NDB_TOOLS_OUTPUT & +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTransactionMaker >> $NDB_TOOLS_OUTPUT & + +# +# let the "BANK" run for a while +# +--sleep 5 + +--disable_warnings +# initialize slave for sync +--connection slave +STOP SLAVE; +RESET SLAVE; +# to make sure we drop any ndbcluster tables +CREATE DATABASE IF NOT EXISTS BANK; +DROP DATABASE BANK; +# create database +CREATE DATABASE BANK; +--enable_warnings + +# +# Time to sync the slave: +# start by taking a backup on master +--connection master +RESET MASTER; +--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT + +# there is no neat way to find the backupid, this is a hack to find it... +--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat +CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM cluster_replication.backup_info; +LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; +--replace_column 1 <the_backup_id> +SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info; +let the_backup_id=`select @the_backup_id`; + +# restore on slave, first check that nothing is there +--connection slave +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT + +# +# now setup replication to continue from last epoch +# 1. get apply_status epoch from slave +# 2. get corresponding _next_ binlog postition from master +# 3. change master on slave +# 4. start the replication + +# 1. +--connection slave +--replace_column 1 <the_epoch> +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +--let $the_epoch= `select @the_epoch` + +# 2. +--connection master +--replace_result $the_epoch <the_epoch> +--replace_column 1 <the_pos> +eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) + FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1; +--let $the_pos= `SELECT @the_pos` +--let $the_file= `SELECT @the_file` + +# 3. +--connection slave +--replace_result $the_pos <the_pos> +eval CHANGE MASTER TO + master_log_file = '$the_file', + master_log_pos = $the_pos; + +# 4. +--connection slave +START SLAVE; + + +# +# Now loop and check consistency every 2 seconds on slave +# +--connection slave +--let $1=10 +while ($1) +{ + --sleep 2 + --replace_result $MASTER_MYPORT MASTER_PORT + --replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> + SHOW SLAVE STATUS; + STOP SLAVE; + --exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT_SLAVE ../storage/ndb/test/ndbapi/bank/bankValidateAllGLs >> $NDB_TOOLS_OUTPUT + START SLAVE; + --dec $1 +} + +# +# Stop transactions +# +--exec killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL + +# +# Check that the databases are the same on slave and master +# 1. dump database BANK on both master and slave +# 2. compare, there should be no difference +# +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/master_BANK.sql +--connection master +use test; +create table t1 (a int primary key) engine=ndb; +insert into t1 values (1); +--sync_slave_with_master +--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/slave_BANK.sql +--connection master +drop table t1; + +--exec diff ./var/tmp/master_BANK.sql ./var/tmp/slave_BANK.sql + + --dec $2 +} diff --git a/mysql-test/t/rpl_ndb_basic.test b/mysql-test/t/rpl_ndb_basic.test new file mode 100644 index 00000000000..95c1737f715 --- /dev/null +++ b/mysql-test/t/rpl_ndb_basic.test @@ -0,0 +1,146 @@ +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + + + +# +# Bug #11087 +# +# connect to the master and create tabe t1 in gotoslave database +--connection master +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', + `nom` char(4) default NULL, + `prenom` char(4) default NULL, + PRIMARY KEY (`nid`)) + ENGINE=ndbcluster DEFAULT CHARSET=latin1; + +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"); +select * from t1 order by nid; + +--sync_slave_with_master +# connect to slave and ensure data it there. +--connection slave +select * from t1 order by nid; + +--connection master +delete from t1; +INSERT INTO t1 VALUES(1,"XYZ2","ABC2"); +# Make sure all rows are on the master +select * from t1 order by nid; + +# make sure all rows are on the slave. +--sync_slave_with_master +--connection slave +# Bug #11087 would have row with nid 2 missing +select * from t1 order by nid; + +--connection master +DROP table t1; + +# +# Test replication of table with no primary key +# +--connection master +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', + `nom` char(4) default NULL, + `prenom` char(4) default NULL) + ENGINE=ndbcluster DEFAULT CHARSET=latin1; + +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"),(2,"AAA","BBB"),(3,"CCC","DDD"); +select * from t1 order by nid; + +--sync_slave_with_master +# connect to slave and ensure data it there. +--connection slave +select * from t1 order by nid; + +--connection master +delete from t1 where nid = 2; +INSERT INTO t1 VALUES(4,"EEE","FFF"); +# Make sure all rows are on the master +select * from t1 order by nid; + +# make sure all rows are on the slave. +--sync_slave_with_master +--connection slave +select * from t1 order by nid; + +--connection master +UPDATE t1 set nid=nid+1; +UPDATE t1 set nom="CCP" where nid = 4; +select * from t1 order by nid; + +# make sure all rows are on the slave. +--sync_slave_with_master +--connection slave +select * from t1 order by nid; + +--connection master +DROP table t1; + +################################################################## +# +# Check that retries are made on the slave on some temporary errors +# + +# +# 1. Deadlock +# +--connection master +CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0', + `nom` char(4) default NULL, + `prenom` char(4) default NULL, + PRIMARY KEY USING HASH (`nid`)) + ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES(1,"XYZ1","ABC1"); + +# cause a lock on that row on the slave +--sync_slave_with_master +--connection slave +BEGIN; +UPDATE t1 SET `nom`="LOCK" WHERE `nid`=1; + +# set number of retries low so we fail the retries +set GLOBAL slave_transaction_retries=1; + +# now do a change to this row on the master +# will deadlock on the slave because of lock above +--connection master +UPDATE t1 SET `nom`="DEAD" WHERE `nid`=1; + +# wait for deadlock to be detected +# sleep longer than dead lock detection timeout in config +# we do this 2 times, once with few retries to verify that we +# get a failure with the set sleep, and once with the _same_ +# sleep, but with more retries to get it to succeed +--sleep 5 + +# replication should have stopped, since max retries where not enough +# verify with show slave status +--connection slave +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> +SHOW SLAVE STATUS; + +# now set max retries high enough to succeed, and start slave again +set GLOBAL slave_transaction_retries=10; +START SLAVE; + +# wait for deadlock to be detected and retried +# should be the same sleep as above for test to be valid +--sleep 5 + +# commit transaction to release lock on row and let replication succeed +select * from t1 order by nid; +COMMIT; + +# verify that the row succeded to be applied on the slave +--connection master +--sync_slave_with_master +--connection slave +select * from t1 order by nid; + +# cleanup +--connection master +DROP TABLE t1; diff --git a/mysql-test/t/rpl_ndb_disk.test b/mysql-test/t/rpl_ndb_disk.test new file mode 100644 index 00000000000..f1aa1ee2bf1 --- /dev/null +++ b/mysql-test/t/rpl_ndb_disk.test @@ -0,0 +1,87 @@ +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# +# Basic test of disk tables for NDB +# + +# +# Start by creating a logfile group +# + +CREATE LOGFILE GROUP lg1 +ADD UNDOFILE 'undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; + +alter logfile group lg1 +add undofile 'undofile02.dat' +initial_size 4M engine=ndb; + +# +# Create a tablespace connected to the logfile group +# + +CREATE TABLESPACE ts1 +ADD DATAFILE 'datafile.dat' +USE LOGFILE GROUP lg1 +INITIAL_SIZE 12M +ENGINE NDB; + +alter tablespace ts1 +add datafile 'datafile02.dat' +initial_size 4M engine=ndb; + +# +# Create a table using this tablespace +# + +CREATE TABLE t1 +(pk1 int not null primary key, b int not null, c int not null) +tablespace ts1 storage disk +engine ndb; + +# +# insert some data +# + +insert into t1 values (1,2,3); +select * from t1 order by pk1; + +# +# check that the data is also on the slave +# + +--sync_slave_with_master +--connection slave +select * from t1 order by pk1; + +# +# view the binlog +# + +--connection master +let $VERSION=`select version()`; +--replace_result $VERSION VERSION +show binlog events; + +# +# cleanup +# + +drop table t1; +alter tablespace ts1 +drop datafile 'datafile.dat' +engine=ndb; +alter tablespace ts1 +drop datafile 'datafile02.dat' +engine=ndb; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg1 ENGINE=NDB; +--sync_slave_with_master diff --git a/mysql-test/t/rpl_ndb_idempotent.test b/mysql-test/t/rpl_ndb_idempotent.test new file mode 100644 index 00000000000..1f14ce59a2e --- /dev/null +++ b/mysql-test/t/rpl_ndb_idempotent.test @@ -0,0 +1,111 @@ +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# +# Currently test only works with ndb since it retrieves "old" +# binlog positions with cluster_replication.binlog_index and apply_status; +# + +# create a table with one row +CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES ("row1","will go away",1); +SELECT * FROM t1 ORDER BY c3; + +# sync slave and retrieve epoch +sync_slave_with_master; +--replace_column 1 <the_epoch> +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +let $the_epoch= `select @the_epoch` ; +SELECT * FROM t1 ORDER BY c3; + +# get the master binlog pos from the epoch +connection master; +--replace_result $the_epoch <the_epoch> +--replace_column 1 <the_pos> +eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) + FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ; +let $the_pos= `SELECT @the_pos` ; +let $the_file= `SELECT @the_file` ; + +# insert some more values +INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); +DELETE FROM t1 WHERE c3 = 1; +UPDATE t1 SET c2="should go away" WHERE c3 = 2; +UPDATE t1 SET c2="C" WHERE c3 = 3; +DELETE FROM t1 WHERE c3 = 2; + +SELECT * FROM t1 ORDER BY c3; + +# check that we have it on the slave +--sync_slave_with_master +--connection slave +SELECT * FROM t1 ORDER BY c3; + +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> +SHOW SLAVE STATUS; + +# stop slave and reset position to before the last changes +STOP SLAVE; +--replace_result $the_pos <the_pos> +eval CHANGE MASTER TO + master_log_file = '$the_file', + master_log_pos = $the_pos ; + +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> +SHOW SLAVE STATUS; + +# start the slave again +# -> same events should have been applied again +# e.g. inserting rows that already there +# deleting a row which is not there +# updating a row which is not there +START SLAVE; + +--connection master +SELECT * FROM t1 ORDER BY c3; +--sync_slave_with_master +--connection slave +SELECT * FROM t1 ORDER BY c3; + +STOP SLAVE; + +# +# cleanup +# +--connection master +DROP TABLE t1; +RESET master; +--connection slave +DROP TABLE t1; +RESET slave; + +START SLAVE; + +# +# Test that we can handle update of a row that does not exist on the slave +# will trigger usage of AO_IgnoreError on slave side so that the INSERT +# still succeeds even if the replication of the UPDATE generates an error. +# +--connection master +CREATE TABLE t1 (c1 CHAR(15) NOT NULL, c2 CHAR(15) NOT NULL, c3 INT NOT NULL, PRIMARY KEY (c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES ("row1","remove on slave",1); + +--sync_slave_with_master +--connection slave +DELETE FROM t1; + +--connection master +BEGIN; +UPDATE t1 SET c2="does not exist" WHERE c3=1; +INSERT INTO t1 VALUES ("row2","new on slave",2); +COMMIT; + +--sync_slave_with_master +--connection slave +SELECT * FROM t1; +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> +SHOW SLAVE STATUS; diff --git a/mysql-test/t/rpl_ndb_load.test b/mysql-test/t/rpl_ndb_load.test new file mode 100644 index 00000000000..2ee540afd18 --- /dev/null +++ b/mysql-test/t/rpl_ndb_load.test @@ -0,0 +1,66 @@ +# +# Currently this test only runs in the source tree with the +# ndb/test programs compiled. +# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_load +# + +--source include/have_ndb.inc +--source include/have_ndb_extra.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--disable_warnings +# reset master +connection master; +DROP DATABASE IF EXISTS TEST_DB; +CREATE DATABASE TEST_DB; +--enable_warnings + +# +# These tables should correspond to the table definitions in +# storage/ndb/test/ndbapi/bench/ +# +connection master; +USE TEST_DB; +CREATE TABLE SUBSCRIBER + ( NUMBER CHAR(12) BINARY NOT NULL, + NAME CHAR(32) BINARY NOT NULL, + GROUP_ID INT UNSIGNED NOT NULL, + LOCATION INT UNSIGNED NOT NULL, + SESSIONS INT UNSIGNED NOT NULL, + CHANGED_BY CHAR(32) BINARY NOT NULL, + CHANGED_TIME CHAR(32) BINARY NOT NULL, + PRIMARY KEY USING HASH (NUMBER)) + ENGINE = NDB; + +CREATE TABLE GROUP_T + ( GROUP_ID INT UNSIGNED NOT NULL, + GROUP_NAME CHAR(32) BINARY NOT NULL, + ALLOW_READ CHAR(1) BINARY NOT NULL, + ALLOW_INSERT INT UNSIGNED NOT NULL, + ALLOW_DELETE INT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (GROUP_ID)) + ENGINE = NDB; + +CREATE TABLE SESSION + ( NUMBER CHAR(12) BINARY NOT NULL, + SERVER_ID INT UNSIGNED NOT NULL, + DATA VARBINARY(1998) NOT NULL, + PRIMARY KEY USING HASH (NUMBER,SERVER_ID)) + ENGINE = NDB; + +CREATE TABLE SERVER + ( SUFFIX CHAR(2) BINARY NOT NULL, + SERVER_ID INT UNSIGNED NOT NULL, + NAME CHAR(32) BINARY NOT NULL, + NO_OF_READ INT UNSIGNED NOT NULL, + NO_OF_INSERT INT UNSIGNED NOT NULL, + NO_OF_DELETE INT UNSIGNED NOT NULL, + PRIMARY KEY USING HASH (SUFFIX, SERVER_ID)) + ENGINE = NDB; + +# +# start "load" application +# +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbCreate >> $NDB_TOOLS_OUTPUT +--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbAsyncGenerator >> $NDB_TOOLS_OUTPUT diff --git a/mysql-test/t/rpl_ndb_multi.test b/mysql-test/t/rpl_ndb_multi.test new file mode 100644 index 00000000000..81f0f318505 --- /dev/null +++ b/mysql-test/t/rpl_ndb_multi.test @@ -0,0 +1,71 @@ +--source include/have_ndb.inc +--source include/have_multi_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# note: server2 is another "master" connected to the master cluster + +# +# Currently test only works with ndb since it retrieves "old" +# binlog positions with cluster_replication.binlog_index and apply_status; +# + +# create a table with one row, and make sure the other "master" gets it +CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ; +connection server2; +reset master; +SHOW TABLES; +connection master; +INSERT INTO t1 VALUES ("row1","will go away",1); +SELECT * FROM t1 ORDER BY c3; +connection server2; +SELECT * FROM t1 ORDER BY c3; + +# sync slave and retrieve epoch and stop the slave +connection master; +sync_slave_with_master; +--replace_column 1 <the_epoch> +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +let $the_epoch= `select @the_epoch` ; +SELECT * FROM t1 ORDER BY c3; +stop slave; + +# get the master binlog pos from the epoch, from the _other_ "master", server2 +connection server2; +--replace_result $the_epoch <the_epoch> +eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) + FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ; +let $the_pos= `SELECT @the_pos` ; +let $the_file= `SELECT @the_file` ; + +# now connect the slave to the _other_ "master" +connection slave; +--replace_result $MASTER_MYPORT1 <MASTER_PORT1> +eval CHANGE MASTER TO + master_port=$MASTER_MYPORT1, + master_log_file = '$the_file', + master_log_pos = $the_pos ; +start slave; + +# insert some more values on the first master +connection master; +INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4); +DELETE FROM t1 WHERE c3 = 1; +UPDATE t1 SET c2="should go away" WHERE c3 = 2; +UPDATE t1 SET c2="C" WHERE c3 = 3; +DELETE FROM t1 WHERE c3 = 2; + +SELECT * FROM t1 ORDER BY c3; + +save_master_pos; + +# insert another row, and check that we have it on the slave +connection server2; +INSERT INTO t1 VALUES ("row5","E",5); +SELECT * FROM t1 ORDER BY c3; +#sync_slave_with_master; +connection slave; +--sleep 2 +SELECT * FROM t1 ORDER BY c3; + +STOP SLAVE; diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test new file mode 100644 index 00000000000..b449735e1be --- /dev/null +++ b/mysql-test/t/rpl_ndb_sync.test @@ -0,0 +1,132 @@ +--source include/have_ndb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# +# Currently test only works with ndb since it retrieves "old" +# binlog positions with cluster_replication.binlog_index and apply_status; +# + +# stop the save +connection slave; +STOP SLAVE; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; + +# get some data on the master +connection master; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; +CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ; +INSERT INTO t1 VALUES (1,1,"row1"),(0,1,"row2"),(1,0,"row3"),(0,0,"row4"); +CREATE TABLE t2 (c1 CHAR(15), c2 BIT(1) NOT NULL, c3 BIT(1) NOT NULL, PRIMARY KEY(c1)) ENGINE = NDB ; +INSERT INTO t2 VALUES ("ABC",1,1),("BCDEF",0,1),("CD",1,0),("DEFGHIJKL",0,0); +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; + +# take a backup on master +--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat +CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT); +DELETE FROM cluster_replication.backup_info; +LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; +--replace_column 1 <the_backup_id> +SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info; +let the_backup_id=`select @the_backup_id` ; + +# update a row +UPDATE t1 SET c2=0 WHERE c3="row2"; +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; + +# restore on slave, first check that nothing is there +connection slave + +# we should have no tables +SHOW TABLES; + +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT + +# +# BUG#11960 +# prior to bugfix "DROP DATABASE" would give a warning since +# the events were not created by ndb_restore +# +DROP DATABASE ndbsynctest; +CREATE DATABASE ndbsynctest; +USE ndbsynctest; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT + +# continue test +SHOW TABLES; + +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; + +# +# now setup replication to continue from last epoch +# 1. get apply_status epoch from slave +# 2. get corresponding _next_ binlog postition from master +# 3. change master on slave + +# 1. +connection slave; +--replace_column 1 <the_epoch> +SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status; +let $the_epoch= `select @the_epoch` ; + +# 2. +connection master; +--replace_result $the_epoch <the_epoch> +--replace_column 1 <the_pos> +eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) + FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1; +let $the_pos= `SELECT @the_pos` ; +let $the_file= `SELECT @the_file` ; + +# 3. +connection slave; +--replace_result $the_pos <the_pos> +eval CHANGE MASTER TO + master_log_file = '$the_file', + master_log_pos = $the_pos ; +START SLAVE; + +# +# +# +connection master; +#sync_slave_with_master; +--sleep 2 +connection slave; +--replace_result $MASTER_MYPORT MASTER_PORT +--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master> +SHOW SLAVE STATUS; + +SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; +SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; + +# +# Cleanup +# + +connection master; +DROP DATABASE ndbsynctest; +#sync_slave_with_master; +--sleep 2 +connection slave; +STOP SLAVE; + +# +# Test some replication commands +# +connection master; +reset master; +# should now contain nothing +select * from cluster_replication.binlog_index; + +connection slave; +reset slave; +# should now contain nothing +select * from cluster_replication.apply_status; diff --git a/mysql-test/t/rpl_row_basic_7ndb.test b/mysql-test/t/rpl_row_basic_7ndb.test new file mode 100644 index 00000000000..d94b4d44dbb --- /dev/null +++ b/mysql-test/t/rpl_row_basic_7ndb.test @@ -0,0 +1,4 @@ +-- source include/have_ndb.inc +let $type= 'NDB' ; +let $extra_index= ; +-- source include/rpl_row_basic.inc diff --git a/scripts/mysql_create_system_tables.sh b/scripts/mysql_create_system_tables.sh index 5fad3222682..a90dfacabbd 100644 --- a/scripts/mysql_create_system_tables.sh +++ b/scripts/mysql_create_system_tables.sh @@ -813,6 +813,8 @@ $c_p $c_pp $c_ev +CREATE DATABASE IF NOT EXISTS cluster_replication; +CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; END_OF_DATA diff --git a/sql/Makefile.am b/sql/Makefile.am index ddbfdb88ba5..4dd1e2bad9c 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -58,6 +58,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h\ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ log_event.h sql_repl.h slave.h rpl_filter.h \ + rpl_injector.h \ stacktrace.h sql_sort.h sql_cache.h set_var.h \ spatial.h gstream.h client_settings.h tzfile.h \ tztime.h my_decimal.h\ @@ -89,6 +90,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \ + rpl_injector.cc \ sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ @@ -104,6 +106,8 @@ EXTRA_mysqld_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \ ha_innodb.h ha_berkeley.h ha_archive.h \ ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \ ha_blackhole.h ha_federated.h ha_ndbcluster.h \ + ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h \ + ha_ndbcluster_tables.h \ ha_partition.cc ha_partition.h mysqld_DEPENDENCIES = @mysql_se_objs@ gen_lex_hash_SOURCES = gen_lex_hash.cc @@ -160,6 +164,9 @@ ha_berkeley.o: ha_berkeley.cc ha_berkeley.h ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< +ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h + $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< + #Until we can get rid of dependencies on ha_ndbcluster.h handler.o: handler.cc ha_ndbcluster.h $(CXXCOMPILE) @ndbcluster_includes@ $(CXXFLAGS) -c $< diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 7bc3af2c3aa..ed2a53ed2c6 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -33,6 +33,8 @@ #include <../util/Bitmask.hpp> #include <ndbapi/NdbIndexStat.hpp> +#include "ha_ndbcluster_binlog.h" + // options from from mysqld.cc extern my_bool opt_ndb_optimized_node_selection; extern const char *opt_ndbcluster_connectstring; @@ -50,13 +52,9 @@ static const int parallelism= 0; // createable against NDB from this handler static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used -static const char *ha_ndb_ext=".ndb"; -static const char share_prefix[]= "./"; - -static int ndbcluster_close_connection(THD *thd); -static int ndbcluster_commit(THD *thd, bool all); -static int ndbcluster_rollback(THD *thd, bool all); -static handler* ndbcluster_create_handler(TABLE_SHARE *table); +static bool ndbcluster_init(void); +static int ndbcluster_end(ha_panic_function flag); +static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); static int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info); handlerton ndbcluster_hton = { @@ -66,29 +64,7 @@ handlerton ndbcluster_hton = { "Clustered, fault-tolerant, memory-based tables", DB_TYPE_NDBCLUSTER, ndbcluster_init, - 0, /* slot */ - 0, /* savepoint size */ - ndbcluster_close_connection, - NULL, /* savepoint_set */ - NULL, /* savepoint_rollback */ - NULL, /* savepoint_release */ - ndbcluster_commit, - ndbcluster_rollback, - NULL, /* prepare */ - NULL, /* recover */ - NULL, /* commit_by_xid */ - NULL, /* rollback_by_xid */ - NULL, /* create_cursor_read_view */ - NULL, /* set_cursor_read_view */ - NULL, /* close_cursor_read_view */ - ndbcluster_create_handler, /* Create a new handler */ - ndbcluster_drop_database, /* Drop a database */ - ndbcluster_end, /* Panic call */ - NULL, /* Start Consistent Snapshot */ - NULL, /* Flush logs */ - ndbcluster_show_status, /* Show status */ - ndbcluster_alter_tablespace, - HTON_NO_FLAGS + ~(uint)0, /* slot */ }; static handler *ndbcluster_create_handler(TABLE_SHARE *table) @@ -121,33 +97,24 @@ static handler *ndbcluster_create_handler(TABLE_SHARE *table) break; \ } -// Typedefs for long names -typedef NdbDictionary::Object NDBOBJ; -typedef NdbDictionary::Column NDBCOL; -typedef NdbDictionary::Table NDBTAB; -typedef NdbDictionary::Index NDBINDEX; -typedef NdbDictionary::Dictionary NDBDICT; -typedef NdbDictionary::Event NDBEVENT; - static int ndbcluster_inited= 0; -static int ndbcluster_util_inited= 0; +int ndbcluster_util_inited= 0; static Ndb* g_ndb= NULL; -static Ndb_cluster_connection* g_ndb_cluster_connection= NULL; +Ndb_cluster_connection* g_ndb_cluster_connection= NULL; +unsigned char g_node_id_map[max_ndb_nodes]; // Handler synchronization pthread_mutex_t ndbcluster_mutex; // Table lock handling -static HASH ndbcluster_open_tables; +HASH ndbcluster_open_tables; static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); -static NDB_SHARE *get_share(const char *key, - bool create_if_not_exists= TRUE, - bool have_lock= FALSE); -static void free_share(NDB_SHARE **share, bool have_lock= FALSE); -static void real_free_share(NDB_SHARE **share); +#ifdef HAVE_NDB_BINLOG +static int rename_share(NDB_SHARE *share, const char *new_key); +#endif static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len); @@ -157,35 +124,9 @@ static int unpackfrm(const void **data, uint *len, static int ndb_get_table_statistics(Ndb*, const char *, struct Ndb_statistics *); -#ifndef DBUG_OFF -void print_records(TABLE *table, const char *record) -{ - if (_db_on_) - { - for (uint j= 0; j < table->s->fields; j++) - { - char buf[40]; - int pos= 0; - Field *field= table->field[j]; - const byte* field_ptr= field->ptr - table->record[0] + record; - int pack_len= field->pack_length(); - int n= pack_len < 10 ? pack_len : 10; - - for (int i= 0; i < n && pos < 20; i++) - { - pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]); - } - buf[pos]= 0; - DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf)); - } - } -} -#else -#define print_records(a,b) -#endif // Util thread variables -static pthread_t ndb_util_thread; +pthread_t ndb_util_thread; pthread_mutex_t LOCK_ndb_util_thread; pthread_cond_t COND_ndb_util_thread; pthread_handler_t ndb_util_thread_func(void *arg); @@ -214,7 +155,7 @@ static long ndb_cluster_node_id= 0; static const char * ndb_connected_host= 0; static long ndb_connected_port= 0; static long ndb_number_of_replicas= 0; -static long ndb_number_of_storage_nodes= 0; +long ndb_number_of_storage_nodes= 0; static int update_status_variables(Ndb_cluster_connection *c) { @@ -235,9 +176,6 @@ SHOW_VAR ndb_status_variables[]= { {NullS, NullS, SHOW_LONG} }; -/* instantiated in storage/ndb/src/ndbapi/Ndbif.cpp */ -extern Uint64 g_latest_trans_gci; - /* Error handling functions */ @@ -365,6 +303,7 @@ Thd_ndb::Thd_ndb() all= NULL; stmt= NULL; error= 0; + options= 0; } Thd_ndb::~Thd_ndb() @@ -391,14 +330,6 @@ Thd_ndb::~Thd_ndb() } inline -Thd_ndb * -get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; } - -inline -void -set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; } - -inline Ndb *ha_ndbcluster::get_ndb() { return get_thd_ndb(current_thd)->ndb; @@ -2517,8 +2448,8 @@ int ha_ndbcluster::delete_row(const byte *record) set to null. */ -static void ndb_unpack_record(TABLE *table, NdbValue *value, - MY_BITMAP *defined, byte *buf) +void ndb_unpack_record(TABLE *table, NdbValue *value, + MY_BITMAP *defined, byte *buf) { Field **p_field= table->field, *field= *p_field; uint row_offset= (uint) (buf - table->record[0]); @@ -2756,6 +2687,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); DBUG_ENTER("ha_ndbcluster::index_read_idx"); DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); + close_scan(); index_init(index_no, 0); DBUG_RETURN(index_read(buf, key, key_len, find_flag)); } @@ -3167,6 +3099,16 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) m_use_write= FALSE; m_ignore_dup_key= FALSE; break; + case HA_EXTRA_IGNORE_NO_KEY: + DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY")); + DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); + m_ignore_no_key= TRUE; + break; + case HA_EXTRA_NO_IGNORE_NO_KEY: + DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY")); + DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); + m_ignore_no_key= FALSE; + break; default: break; } @@ -3597,7 +3539,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) Commit a transaction started in NDB */ -int ndbcluster_commit(THD *thd, bool all) +static int ndbcluster_commit(THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -3648,7 +3590,7 @@ int ndbcluster_commit(THD *thd, bool all) Rollback a transaction started in NDB */ -int ndbcluster_rollback(THD *thd, bool all) +static int ndbcluster_rollback(THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -3989,12 +3931,16 @@ int ha_ndbcluster::create(const char *name, if (create_from_engine) { /* - Table alreay exists in NDB and frm file has been created by + Table already exists in NDB and frm file has been created by caller. Do Ndb specific stuff, such as create a .ndb file */ if ((my_errno= write_ndb_file())) DBUG_RETURN(my_errno); +#ifdef HAVE_NDB_BINLOG + if (ndb_binlog_thread_running > 0) + ndbcluster_create_binlog_setup(get_ndb(), name2, m_dbname, m_tabname, 0); +#endif /* HAVE_NDB_BINLOG */ DBUG_RETURN(my_errno); } @@ -4133,6 +4079,74 @@ int ha_ndbcluster::create(const char *name, if (!my_errno) my_errno= write_ndb_file(); +#ifdef HAVE_NDB_BINLOG + if (!my_errno) + { + NDB_SHARE *share= 0; + pthread_mutex_lock(&ndbcluster_mutex); + /* + First make sure we get a "fresh" share here, not an old trailing one... + */ + { + const char *key= name2; + uint length= (uint) strlen(key); + if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) key, length))) + handle_trailing_share(share); + } + /* + get a new share + */ + if (!(share= get_share(name2, form, true, true))) + { + sql_print_error("NDB: allocating table share for %s failed", name2); + /* my_errno is set */ + } + pthread_mutex_unlock(&ndbcluster_mutex); + + while (!IS_TMP_PREFIX(m_tabname)) + { + const NDBTAB *t= dict->getTable(m_tabname); + String event_name(INJECTOR_EVENT_LEN); + ndb_rep_event_name(&event_name,m_dbname,m_tabname); + + /* + Always create an event for the table, as other mysql servers + expect it to be there. + */ + if (ndbcluster_create_event(ndb, t, event_name.c_ptr(), share) < 0) + { + /* this is only a serious error if the binlog is on */ + if (share && ndb_binlog_thread_running > 0) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + "Creating event for logging table failed. " + "See error log for details."); + } + break; + } + if (ndb_extra_logging) + sql_print_information("NDB Binlog: CREATE TABLE Event: %s", + event_name.c_ptr()); + + if (share && ndb_binlog_thread_running > 0 && + ndbcluster_create_event_ops(share, t, event_name.c_ptr()) < 0) + { + sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations." + " Event: %s", name2); + /* a warning has been issued to the client */ + } + ndbcluster_log_schema_op(current_thd, share, + current_thd->query, current_thd->query_length, + share->db, share->table_name, + 0, 0, + SOT_CREATE_TABLE); + break; + } + } +#endif /* HAVE_NDB_BINLOG */ + DBUG_RETURN(my_errno); } @@ -4227,6 +4241,15 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if (!(orig_tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); } +#ifdef HAVE_NDB_BINLOG + NDB_SHARE *share= 0; + if (ndb_binlog_thread_running > 0 && + (share= get_share(from, 0, false))) + { + int r= rename_share(share, to); + DBUG_ASSERT(r == 0); + } +#endif m_table= (void *)orig_tab; // Change current database to that of target table set_dbname(to); @@ -4234,6 +4257,14 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if ((result= alter_table_name(new_tabname))) { +#ifdef HAVE_NDB_BINLOG + if (share) + { + int r= rename_share(share, from); + DBUG_ASSERT(r == 0); + free_share(&share); + } +#endif DBUG_RETURN(result); } @@ -4241,9 +4272,76 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if ((result= handler::rename_table(from, to))) { // ToDo in 4.1 should rollback alter table... +#ifdef HAVE_NDB_BINLOG + if (share) + free_share(&share); +#endif DBUG_RETURN(result); } +#ifdef HAVE_NDB_BINLOG + int is_old_table_tmpfile= 1; + if (share && share->op) + dict->forceGCPWait(); + + /* handle old table */ + if (!IS_TMP_PREFIX(m_tabname)) + { + is_old_table_tmpfile= 0; + String event_name(INJECTOR_EVENT_LEN); + ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0); + ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share); + } + + if (!result && !IS_TMP_PREFIX(new_tabname)) + { + /* always create an event for the table */ + String event_name(INJECTOR_EVENT_LEN); + ndb_rep_event_name(&event_name, to + sizeof(share_prefix) - 1, 0); + const NDBTAB *ndbtab= dict->getTable(new_tabname); + + if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share) >= 0) + { + if (ndb_extra_logging) + sql_print_information("NDB Binlog: RENAME Event: %s", + event_name.c_ptr()); + if (share) + { + if (ndbcluster_create_event_ops(share, ndbtab, + event_name.c_ptr()) < 0) + { + sql_print_error("NDB Binlog: FAILED create event operations " + "during RENAME. Event %s", event_name.c_ptr()); + /* a warning has been issued to the client */ + } + } + } + else + { + sql_print_error("NDB Binlog: FAILED create event during RENAME. " + "Event: %s", event_name.c_ptr()); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + "Creating event for logging table failed. " + "See error log for details."); + } + if (is_old_table_tmpfile) + ndbcluster_log_schema_op(current_thd, share, + current_thd->query, current_thd->query_length, + m_dbname, new_tabname, + 0, 0, + SOT_ALTER_TABLE); + else + ndbcluster_log_schema_op(current_thd, share, + current_thd->query, current_thd->query_length, + m_dbname, new_tabname, + 0, 0, + SOT_RENAME_TABLE); + } + if (share) + free_share(&share); +#endif + DBUG_RETURN(result); } @@ -4286,6 +4384,9 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, { DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table"); NDBDICT *dict= ndb->getDictionary(); +#ifdef HAVE_NDB_BINLOG + NDB_SHARE *share= get_share(path, 0, false); +#endif /* Drop the table from NDB */ @@ -4302,9 +4403,75 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, if (res) { +#ifdef HAVE_NDB_BINLOG + /* the drop table failed for some reason, drop the share anyways */ + if (share) + { + pthread_mutex_lock(&ndbcluster_mutex); + if (share->state != NSS_DROPPED) + { + /* + The share kept by the server has not been freed, free it + */ + share->state= NSS_DROPPED; + free_share(&share, TRUE); + } + /* free the share taken above */ + free_share(&share, TRUE); + pthread_mutex_unlock(&ndbcluster_mutex); + } +#endif DBUG_RETURN(res); } +#ifdef HAVE_NDB_BINLOG + /* stop the logging of the dropped table, and cleanup */ + + /* + drop table is successful even if table does not exist in ndb + and in case table was actually not dropped, there is no need + to force a gcp, and setting the event_name to null will indicate + that there is no event to be dropped + */ + int table_dropped= dict->getNdbError().code != 709; + + if (!IS_TMP_PREFIX(table_name) && share) + { + ndbcluster_log_schema_op(current_thd, share, + current_thd->query, current_thd->query_length, + share->db, share->table_name, + 0, 0, + SOT_DROP_TABLE); + } + else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op + will do a force GCP */ + dict->forceGCPWait(); + + if (!IS_TMP_PREFIX(table_name)) + { + String event_name(INJECTOR_EVENT_LEN); + ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0); + ndbcluster_handle_drop_table(ndb, + table_dropped ? event_name.c_ptr() : 0, + share); + } + + if (share) + { + pthread_mutex_lock(&ndbcluster_mutex); + if (share->state != NSS_DROPPED) + { + /* + The share kept by the server has not been freed, free it + */ + share->state= NSS_DROPPED; + free_share(&share, TRUE); + } + /* free the share taken above */ + free_share(&share, TRUE); + pthread_mutex_unlock(&ndbcluster_mutex); + } +#endif DBUG_RETURN(0); } @@ -4393,7 +4560,8 @@ ulonglong ha_ndbcluster::get_auto_increment() HA_NO_PREFIX_CHAR_KEYS | \ HA_NEED_READ_RANGE_BUFFER | \ HA_CAN_GEOMETRY | \ - HA_CAN_BIT_FIELD + HA_CAN_BIT_FIELD | \ + HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg): handler(&ndbcluster_hton, table_arg), @@ -4518,7 +4686,7 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) DBUG_PRINT("info", (" ref_length: %d", ref_length)); } // Init table lock structure - if (!(m_share=get_share(name))) + if (!(m_share=get_share(name, table))) DBUG_RETURN(1); thr_lock_data_init(&m_share->lock,&m_lock,(void*) 0); @@ -4629,7 +4797,7 @@ int ha_ndbcluster::check_ndb_connection(THD* thd) } -int ndbcluster_close_connection(THD *thd) +static int ndbcluster_close_connection(THD *thd) { Thd_ndb *thd_ndb= get_thd_ndb(thd); DBUG_ENTER("ndbcluster_close_connection"); @@ -4792,14 +4960,21 @@ int ndbcluster_drop_database_impl(const char *path) DBUG_RETURN(ret); } -void ndbcluster_drop_database(char *path) +static void ndbcluster_drop_database(char *path) { ndbcluster_drop_database_impl(path); +#ifdef HAVE_NDB_BINLOG + char db[FN_REFLEN]; + ha_ndbcluster::set_dbname(path, db); + ndbcluster_log_schema_op(current_thd, 0, + current_thd->query, current_thd->query_length, + db, "", 0, 0, SOT_DROP_DB); +#endif } /* find all tables in ndb and discover those needed */ -static int ndbcluster_find_all_files(THD *thd) +int ndbcluster_find_all_files(THD *thd) { DBUG_ENTER("ndbcluster_find_all_files"); Ndb* ndb; @@ -4820,6 +4995,11 @@ static int ndbcluster_find_all_files(THD *thd) for (uint i= 0 ; i < list.count ; i++) { NDBDICT::List::Element& elmt= list.elements[i]; + if (IS_TMP_PREFIX(elmt.name)) + { + DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); + continue; + } DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name)); if (!(elmt.state == NDBOBJ::StateBuilding || elmt.state == NDBOBJ::StateOnline)) @@ -4834,10 +5014,11 @@ static int ndbcluster_find_all_files(THD *thd) if (!(ndbtab= dict->getTable(elmt.name))) { - sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s", - elmt.database, elmt.name, - dict->getNdbError().code, - dict->getNdbError().message); + if (elmt.state == NDBOBJ::StateOnline) + sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s", + elmt.database, elmt.name, + dict->getNdbError().code, + dict->getNdbError().message); unhandled++; continue; } @@ -4876,6 +5057,31 @@ static int ndbcluster_find_all_files(THD *thd) } pthread_mutex_unlock(&LOCK_open); } +#ifdef HAVE_NDB_BINLOG + else if (ndb_binlog_thread_running > 0) + { + /* set up replication for this table */ + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + if (((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables, + (byte*) key, strlen(key))) + && share->op == 0 && share->op_old == 0) + || share == 0) + { + /* + there is no binlog creation setup for this table + attempt to do it + */ + pthread_mutex_unlock(&ndbcluster_mutex); + pthread_mutex_lock(&LOCK_open); + ndbcluster_create_binlog_setup(ndb, key, elmt.database, elmt.name, + share); + pthread_mutex_unlock(&LOCK_open); + } + else + pthread_mutex_unlock(&ndbcluster_mutex); + } +#endif } } while (unhandled && retries--); @@ -4925,6 +5131,11 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, for (i= 0 ; i < list.count ; i++) { NDBDICT::List::Element& elmt= list.elements[i]; + if (IS_TMP_PREFIX(elmt.name)) + { + DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); + continue; + } DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name)); // Add only tables that belongs to db @@ -4983,6 +5194,39 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, } } +#ifdef HAVE_NDB_BINLOG + /* setup logging to binlog for all discovered tables */ + if (ndb_binlog_thread_running > 0) + { + char *end; + char *end1= + strxnmov(name, sizeof(name), mysql_data_home, "/", db, "/", NullS); + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + for (i= 0; i < ok_tables.records; i++) + { + file_name= (char*)hash_element(&ok_tables, i); + end= strxnmov(end1, sizeof(name) - (end1 - name), file_name, NullS); + if ((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables, + (byte*)name, end - name)) + && share->op == 0 && share->op_old == 0) + { + /* + there is no binlog creation setup for this table + attempt to do it + */ + + pthread_mutex_unlock(&ndbcluster_mutex); + pthread_mutex_lock(&LOCK_open); + ndbcluster_create_binlog_setup(ndb, name, db, file_name, share); + pthread_mutex_unlock(&LOCK_open); + pthread_mutex_lock(&ndbcluster_mutex); + } + } + pthread_mutex_unlock(&ndbcluster_mutex); + } +#endif + // Check for new files to discover DBUG_PRINT("info", ("Checking for new files to discover")); List<char> create_list; @@ -5055,11 +5299,18 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, static int connect_callback() { update_status_variables(g_ndb_cluster_connection); + + uint node_id, i= 0; + Ndb_cluster_connection_node_iter node_iter; + memset((void *)g_node_id_map, 0xFFFF, sizeof(g_node_id_map)); + while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter))) + g_node_id_map[node_id]= i++; + pthread_cond_signal(&COND_ndb_util_thread); return 0; } -bool ndbcluster_init() +static bool ndbcluster_init() { int res; DBUG_ENTER("ndbcluster_init"); @@ -5067,6 +5318,22 @@ bool ndbcluster_init() if (have_ndbcluster != SHOW_OPTION_YES) goto ndbcluster_init_error; + { + handlerton &h= ndbcluster_hton; + h.close_connection= ndbcluster_close_connection; + h.commit= ndbcluster_commit; + h.rollback= ndbcluster_rollback; + h.create= ndbcluster_create_handler; /* Create a new handler */ + h.drop_database= ndbcluster_drop_database; /* Drop a database */ + h.panic= ndbcluster_end; /* Panic call */ + h.show_status= ndbcluster_show_status; /* Show status */ + h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ +#ifdef HAVE_NDB_BINLOG + ndbcluster_binlog_init_handlerton(); +#endif + h.flags= HTON_NO_FLAGS; + } + // Set connectstring if specified if (opt_ndbcluster_connectstring != 0) DBUG_PRINT("connectstring", ("%s", opt_ndbcluster_connectstring)); @@ -5130,6 +5397,22 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); +#ifdef HAVE_NDB_BINLOG + /* start the ndb injector thread */ + if (opt_bin_log) + { + if (binlog_row_based) + { + if (ndbcluster_binlog_start()) + goto ndbcluster_init_error; + } + else + { + sql_print_error("NDB: only row based binary logging is supported"); + } + } +#endif /* HAVE_NDB_BINLOG */ + pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST); pthread_cond_init(&COND_ndb_util_thread, NULL); @@ -5160,26 +5443,13 @@ ndbcluster_init_error: DBUG_RETURN(TRUE); } - -/* - End use of the NDB Cluster table handler - - free all global variables allocated by - ndbcluster_init() -*/ - -int ndbcluster_end(ha_panic_function type) +static int ndbcluster_end(ha_panic_function type) { DBUG_ENTER("ndbcluster_end"); if (!ndbcluster_inited) DBUG_RETURN(0); - // Kill ndb utility thread - (void) pthread_mutex_lock(&LOCK_ndb_util_thread); - DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread)); - (void) pthread_cond_signal(&COND_ndb_util_thread); - (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); - if (g_ndb) { #ifndef DBUG_OFF @@ -5206,7 +5476,6 @@ int ndbcluster_end(ha_panic_function type) pthread_mutex_destroy(&LOCK_ndb_util_thread); pthread_cond_destroy(&COND_ndb_util_thread); ndbcluster_inited= 0; - ndbcluster_util_inited= 0; DBUG_RETURN(0); } @@ -5673,60 +5942,6 @@ ha_ndbcluster::register_query_cache_table(THD *thd, } -#ifndef DBUG_OFF -static void dbug_print_table(const char *info, TABLE *table) -{ - if (table == 0) - { - DBUG_PRINT("info",("%s: (null)", info)); - return; - } - DBUG_PRINT("info", - ("%s: %s.%s s->fields: %d " - "reclength: %d rec_buff_length: %d record[0]: %lx " - "record[1]: %lx", - info, - table->s->db, - table->s->table_name, - table->s->fields, - table->s->reclength, - table->s->rec_buff_length, - table->record[0], - table->record[1])); - - for (unsigned int i= 0; i < table->s->fields; i++) - { - Field *f= table->field[i]; - DBUG_PRINT("info", - ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d " - "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", - i, - f->field_name, - f->flags, - (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", - (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", - (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", - (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "", - (f->flags & BLOB_FLAG) ? ",blob" : "", - (f->flags & BINARY_FLAG) ? ",binary" : "", - f->real_type(), - f->pack_length(), - f->ptr, f->ptr - table->record[0], - f->null_bit, - f->null_ptr, (byte*) f->null_ptr - table->record[0])); - if (f->type() == MYSQL_TYPE_BIT) - { - Field_bit *g= (Field_bit*) f; - DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " - "bit_ofs: %u bit_len: %u", - g->field_length, g->bit_ptr, - (byte*) g->bit_ptr-table->record[0], - g->bit_ofs, g->bit_len)); - } - } -} -#endif - /* Handling the shared NDB_SHARE structure that is needed to provide table locking. @@ -5756,6 +5971,12 @@ static void dbug_print_open_tables() ("db.tablename: %s.%s use_count: %d commit_count: %d", share->db, share->table_name, share->use_count, share->commit_count)); +#ifdef HAVE_NDB_BINLOG + if (share->table) + DBUG_PRINT("share", + ("table->s->db.table_name: %s.%s", + share->table->s->db.str, share->table->s->table_name.str)); +#endif } DBUG_VOID_RETURN; } @@ -5763,11 +5984,170 @@ static void dbug_print_open_tables() #define dbug_print_open_tables() #endif +#ifdef HAVE_NDB_BINLOG +/* + For some reason a share is still around, try to salvage the situation + by closing all cached tables. If the share still exists, there is an + error somewhere but only report this to the error log. Keep this + "trailing share" but rename it since there are still references to it + to avoid segmentation faults. There is a risk that the memory for + this trailing share leaks. + + Must be called with previous pthread_mutex_lock(&ndbcluster_mutex) +*/ +int handle_trailing_share(NDB_SHARE *share) +{ + static ulong trailing_share_id= 0; + DBUG_ENTER("handle_trailing_share"); + + ++share->use_count; + pthread_mutex_unlock(&ndbcluster_mutex); + + close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE); + + pthread_mutex_lock(&ndbcluster_mutex); + if (!--share->use_count) + { + DBUG_PRINT("info", ("NDB_SHARE: close_cashed_tables %s freed share.", + share->key)); + real_free_share(&share); + DBUG_RETURN(0); + } + + /* + share still exists, if share has not been dropped by server + release that share + */ + if (share->state != NSS_DROPPED && !--share->use_count) + { + DBUG_PRINT("info", ("NDB_SHARE: %s already exists, " + "use_count=%d state != NSS_DROPPED.", + share->key, share->use_count)); + real_free_share(&share); + DBUG_RETURN(0); + } + DBUG_PRINT("error", ("NDB_SHARE: %s already exists use_count=%d.", + share->key, share->use_count)); + + sql_print_error("NDB_SHARE: %s already exists use_count=%d." + " Moving away for safety, but possible memleak.", + share->key, share->use_count); + dbug_print_open_tables(); + + /* + This is probably an error. We can however save the situation + at the cost of a possible mem leak, by "renaming" the share + - First remove from hash + */ + hash_delete(&ndbcluster_open_tables, (byte*) share); + + /* + now give it a new name, just a running number + if space is not enough allocate some more + */ + { + const uint min_key_length= 10; + if (share->key_length < min_key_length) + { + share->key= alloc_root(&share->mem_root, min_key_length + 1); + share->key_length= min_key_length; + } + share->key_length= + my_snprintf(share->key, min_key_length + 1, "#leak%d", + trailing_share_id++); + } + /* Keep it for possible the future trailing free */ + my_hash_insert(&ndbcluster_open_tables, (byte*) share); + + DBUG_RETURN(0); +} + +/* + Rename share is used during rename table. +*/ +static int rename_share(NDB_SHARE *share, const char *new_key) +{ + NDB_SHARE *tmp; + pthread_mutex_lock(&ndbcluster_mutex); + uint new_length= (uint) strlen(new_key); + DBUG_PRINT("rename_share", ("old_key: %s old__length: %d", + share->key, share->key_length)); + if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) new_key, new_length))) + handle_trailing_share(tmp); + + /* remove the share from hash */ + hash_delete(&ndbcluster_open_tables, (byte*) share); + dbug_print_open_tables(); + + /* save old stuff if insert should fail */ + uint old_length= share->key_length; + char *old_key= share->key; + + /* + now allocate and set the new key, db etc + enough space for key, db, and table_name + */ + share->key= alloc_root(&share->mem_root, 2 * (new_length + 1)); + strmov(share->key, new_key); + share->key_length= new_length; + + if (my_hash_insert(&ndbcluster_open_tables, (byte*) share)) + { + // ToDo free the allocated stuff above? + DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed", + share->key)); + share->key= old_key; + share->key_length= old_length; + if (my_hash_insert(&ndbcluster_open_tables, (byte*) share)) + { + sql_print_error("rename_share: failed to recover %s", share->key); + DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed", + share->key)); + } + dbug_print_open_tables(); + pthread_mutex_unlock(&ndbcluster_mutex); + return -1; + } + dbug_print_open_tables(); + + share->db= share->key + new_length + 1; + ha_ndbcluster::set_dbname(new_key, share->db); + share->table_name= share->db + strlen(share->db) + 1; + ha_ndbcluster::set_tabname(new_key, share->table_name); + + DBUG_PRINT("rename_share", + ("0x%lx key: %s key_length: %d", + share, share->key, share->key_length)); + DBUG_PRINT("rename_share", + ("db.tablename: %s.%s use_count: %d commit_count: %d", + share->db, share->table_name, + share->use_count, share->commit_count)); + DBUG_PRINT("rename_share", + ("table->s->db.table_name: %s.%s", + share->table->s->db.str, share->table->s->table_name.str)); + + if (share->op == 0) + { + share->table->s->db.str= share->db; + share->table->s->db.length= strlen(share->db); + share->table->s->table_name.str= share->table_name; + share->table->s->table_name.length= strlen(share->table_name); + } + /* else rename will be handled when the ALTER event comes */ + share->old_names= old_key; + // ToDo free old_names after ALTER EVENT + + pthread_mutex_unlock(&ndbcluster_mutex); + return 0; +} +#endif + /* Increase refcount on existing share. Always returns share and cannot fail. */ -static NDB_SHARE *get_share(NDB_SHARE *share) +NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share) { pthread_mutex_lock(&ndbcluster_mutex); share->use_count++; @@ -5799,9 +6179,13 @@ static NDB_SHARE *get_share(NDB_SHARE *share) have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken */ -static NDB_SHARE *get_share(const char *key, bool create_if_not_exists, - bool have_lock) +NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, + bool create_if_not_exists, + bool have_lock) { + DBUG_ENTER("get_share"); + DBUG_PRINT("info", ("get_share: key %s", key)); + THD *thd= current_thd; NDB_SHARE *share; if (!have_lock) pthread_mutex_lock(&ndbcluster_mutex); @@ -5847,6 +6231,9 @@ static NDB_SHARE *get_share(const char *key, bool create_if_not_exists, ha_ndbcluster::set_dbname(key, share->db); share->table_name= share->db + strlen(share->db) + 1; ha_ndbcluster::set_tabname(key, share->table_name); +#ifdef HAVE_NDB_BINLOG + ndbcluster_binlog_init_share(share, table); +#endif *root_ptr= old_root; } else @@ -5874,7 +6261,7 @@ static NDB_SHARE *get_share(const char *key, bool create_if_not_exists, return share; } -static void real_free_share(NDB_SHARE **share) +void ndbcluster_real_free_share(NDB_SHARE **share) { DBUG_PRINT("real_free_share", ("0x%lx key: %s key_length: %d", @@ -5889,6 +6276,26 @@ static void real_free_share(NDB_SHARE **share) pthread_mutex_destroy(&(*share)->mutex); free_root(&(*share)->mem_root, MYF(0)); +#ifdef HAVE_NDB_BINLOG + if ((*share)->table) + { + closefrm((*share)->table, 0); +#if 0 // todo ? + free_root(&(*share)->table->mem_root, MYF(0)); +#endif + +#ifndef DBUG_OFF + bzero((gptr)(*share)->table_share, sizeof(*(*share)->table_share)); + bzero((gptr)(*share)->table, sizeof(*(*share)->table)); +#endif + my_free((gptr) (*share)->table_share, MYF(0)); + my_free((gptr) (*share)->table, MYF(0)); +#ifndef DBUG_OFF + (*share)->table_share= 0; + (*share)->table= 0; +#endif + } +#endif my_free((gptr) *share, MYF(0)); *share= 0; @@ -5901,7 +6308,7 @@ static void real_free_share(NDB_SHARE **share) have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken */ -static void free_share(NDB_SHARE **share, bool have_lock) +void ndbcluster_free_share(NDB_SHARE **share, bool have_lock) { if (!have_lock) pthread_mutex_lock(&ndbcluster_mutex); @@ -5927,7 +6334,6 @@ static void free_share(NDB_SHARE **share, bool have_lock) } - /* Internal representation of the frm blob @@ -6601,7 +7007,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) Wait for cluster to start */ pthread_mutex_lock(&LOCK_ndb_util_thread); - while (!ndb_cluster_node_id) + while (!ndb_cluster_node_id && (ndbcluster_hton.slot != ~(uint)0)) { /* ndb not connected yet */ set_timespec(abstime, 1); @@ -6616,13 +7022,35 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) } pthread_mutex_unlock(&LOCK_ndb_util_thread); + { + Thd_ndb *thd_ndb; + if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) + { + sql_print_error("Could not allocate Thd_ndb object"); + goto ndb_util_thread_end; + } + set_thd_ndb(thd, thd_ndb); + thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP; + } + +#ifdef HAVE_NDB_BINLOG + /* create tables needed by the replication */ + ndbcluster_setup_binlog_table_shares(thd); +#else /* Get all table definitions from the storage node */ ndbcluster_find_all_files(thd); +#endif ndbcluster_util_inited= 1; +#ifdef HAVE_NDB_BINLOG + /* If running, signal injector thread that all is setup */ + if (ndb_binlog_thread_running > 0) + pthread_cond_signal(&injector_cond); +#endif + set_timespec(abstime, 0); for (;!abort_loop;) { @@ -6639,6 +7067,15 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) if (abort_loop) break; /* Shutting down server */ +#ifdef HAVE_NDB_BINLOG + /* + Check that the apply_status_share and schema_share has been created. + If not try to create it + */ + if (!apply_status_share || !schema_share) + ndbcluster_setup_binlog_table_shares(thd); +#endif + if (ndb_cache_check_time == 0) { /* Wake up in 1 second to check if value has changed */ @@ -6652,6 +7089,12 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) for (uint i= 0; i < ndbcluster_open_tables.records; i++) { share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); +#ifdef HAVE_NDB_BINLOG + if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0)) + <= 0) + continue; // injector thread is the only user, skip statistics + share->util_lock= current_thd; // Mark that util thread has lock +#endif /* HAVE_NDB_BINLOG */ share->use_count++; /* Make sure the table can't be closed */ DBUG_PRINT("ndb_util_thread", ("Found open table[%d]: %s, use_count: %d", @@ -6666,6 +7109,17 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) List_iterator_fast<NDB_SHARE> it(util_open_tables); while ((share= it++)) { +#ifdef HAVE_NDB_BINLOG + if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0)) + <= 1) + { + /* + Util thread and injector thread is the only user, skip statistics + */ + free_share(&share); + continue; + } +#endif /* HAVE_NDB_BINLOG */ DBUG_PRINT("ndb_util_thread", ("Fetching commit count for: %s", share->key)); @@ -6727,6 +7181,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) } } ndb_util_thread_end: + sql_print_information("Stopping Cluster Utility thread"); net_end(&thd->net); thd->cleanup(); delete thd; @@ -8072,6 +8527,7 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, enum ha_stat_type stat_type) { char buf[IO_SIZE]; + uint buflen; DBUG_ENTER("ndbcluster_show_status"); if (have_ndbcluster != SHOW_OPTION_YES) @@ -8082,7 +8538,23 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, { DBUG_RETURN(FALSE); } - + + update_status_variables(g_ndb_cluster_connection); + buflen= + my_snprintf(buf, sizeof(buf), + "cluster_node_id=%u, " + "connected_host=%s, " + "connected_port=%u, " + "number_of_storage_nodes=%u", + ndb_cluster_node_id, + ndb_connected_host, + ndb_connected_port, + ndb_number_of_storage_nodes); + if (stat_print(thd, ndbcluster_hton.name, strlen(ndbcluster_hton.name), + "connection", strlen("connection"), + buf, buflen)) + DBUG_RETURN(TRUE); + if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb) { Ndb* ndb= (get_thd_ndb(thd))->ndb; @@ -8090,7 +8562,7 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, tmp.m_name= 0; while (ndb->get_free_list_usage(&tmp)) { - uint buflen= + buflen= my_snprintf(buf, sizeof(buf), "created=%u, free=%u, sizeof=%u", tmp.m_created, tmp.m_free, tmp.m_sizeof); @@ -8099,11 +8571,14 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, DBUG_RETURN(TRUE); } } - send_eof(thd); - +#ifdef HAVE_NDB_BINLOG + ndbcluster_show_status_binlog(thd, stat_print, stat_type); +#endif + DBUG_RETURN(FALSE); } + /* Create a table in NDB Cluster */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index f05c1c32a1a..694c9d9ff53 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -42,8 +42,10 @@ class NdbEventOperation; // connectstring to cluster if given by mysqld extern const char *ndbcluster_connectstring; extern ulong ndb_cache_check_time; +#ifdef HAVE_NDB_BINLOG extern ulong ndb_report_thresh_binlog_epoch_slip; extern ulong ndb_report_thresh_binlog_mem_usage; +#endif typedef enum ndb_index_type { UNDEFINED_INDEX = 0, @@ -86,8 +88,26 @@ typedef struct st_ndbcluster_share { ulonglong commit_count; char *db; char *table_name; +#ifdef HAVE_NDB_BINLOG + uint32 flags; + NDB_SHARE_STATE state; + NdbEventOperation *op; + NdbEventOperation *op_old; // for rename table + char *old_names; // for rename table + TABLE_SHARE *table_share; + TABLE *table; + NdbValue *ndb_value[2]; + MY_BITMAP *subscriber_bitmap; + MY_BITMAP slock_bitmap; + uint32 slock[256/32]; // 256 bits for lock status of table +#endif } NDB_SHARE; +#ifdef HAVE_NDB_BINLOG +/* NDB_SHARE.flags */ +#define NSF_HIDDEN_PK 1 /* table has hidden primary key */ +#endif + typedef enum ndb_item_type { NDB_VALUE = 0, // Qualified more with Item::Type NDB_FIELD = 1, // Qualified from table definition @@ -461,6 +481,11 @@ class Ndb_cond_traverse_context Place holder for ha_ndbcluster thread specific data */ +enum THD_NDB_OPTIONS +{ + TNO_NO_LOG_SCHEMA_OP= 1 << 0 +}; + class Thd_ndb { public: @@ -472,6 +497,7 @@ class Thd_ndb NdbTransaction *all; NdbTransaction *stmt; int error; + uint32 options; List<NDB_SHARE> changed_tables; }; @@ -553,6 +579,9 @@ class ha_ndbcluster: public handler bool low_byte_first() const; bool has_transactions(); + + virtual bool is_injective() const { return true; } + const char* index_type(uint key_number); double scan_time(); @@ -773,18 +802,10 @@ private: extern SHOW_VAR ndb_status_variables[]; -bool ndbcluster_init(void); -int ndbcluster_end(ha_panic_function flag); - int ndbcluster_discover(THD* thd, const char* dbname, const char* name, const void** frmblob, uint* frmlen); int ndbcluster_find_files(THD *thd,const char *db,const char *path, const char *wild, bool dir, List<char> *files); int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name); -void ndbcluster_drop_database(char* path); - void ndbcluster_print_error(int error, const NdbOperation *error_op); - -bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); - diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc new file mode 100644 index 00000000000..c80b2b27d8d --- /dev/null +++ b/sql/ha_ndbcluster_binlog.cc @@ -0,0 +1,2732 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include "mysql_priv.h" +#include "ha_ndbcluster.h" + +#ifdef HAVE_NDB_BINLOG +#include "rpl_injector.h" +#include "slave.h" +#include "ha_ndbcluster_binlog.h" + +/* + defines for cluster replication table names +*/ +#include "ha_ndbcluster_tables.h" +#define NDB_APPLY_TABLE_FILE "./" NDB_REP_DB "/" NDB_APPLY_TABLE +#define NDB_SCHEMA_TABLE_FILE "./" NDB_REP_DB "/" NDB_SCHEMA_TABLE + +/* + Flag showing if the ndb injector thread is running, if so == 1 + -1 if it was started but later stopped for some reason + 0 if never started +*/ +int ndb_binlog_thread_running= 0; + +/* + Global reference to the ndb injector thread THD oject + + Has one sole purpose, for setting the in_use table member variable + in get_share(...) +*/ +THD *injector_thd= 0; + +/* + Global reference to ndb injector thd object. + + Used mainly by the binlog index thread, but exposed to the client sql + thread for one reason; to setup the events operations for a table + to enable ndb injector thread receiving events. + + Must therefore always be used with a surrounding + pthread_mutex_lock(&injector_mutex), when doing create/dropEventOperation +*/ +static Ndb *injector_ndb= 0; +static Ndb *schema_ndb= 0; + +/* + Mutex and condition used for interacting between client sql thread + and injector thread +*/ +pthread_t ndb_binlog_thread; +pthread_mutex_t injector_mutex; +pthread_cond_t injector_cond; + +/* NDB Injector thread (used for binlog creation) */ +static ulonglong ndb_latest_applied_binlog_epoch= 0; +static ulonglong ndb_latest_handled_binlog_epoch= 0; +static ulonglong ndb_latest_received_binlog_epoch= 0; + +NDB_SHARE *apply_status_share= 0; +NDB_SHARE *schema_share= 0; + +/* instantiated in storage/ndb/src/ndbapi/Ndbif.cpp */ +extern Uint64 g_latest_trans_gci; + +/* + Global variables for holding the binlog_index table reference +*/ +static TABLE *binlog_index= 0; +static TABLE_LIST binlog_tables; + +/* + Helper functions +*/ + +#ifndef DBUG_OFF +static void print_records(TABLE *table, const char *record) +{ + if (_db_on_) + { + for (uint j= 0; j < table->s->fields; j++) + { + char buf[40]; + int pos= 0; + Field *field= table->field[j]; + const byte* field_ptr= field->ptr - table->record[0] + record; + int pack_len= field->pack_length(); + int n= pack_len < 10 ? pack_len : 10; + + for (int i= 0; i < n && pos < 20; i++) + { + pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]); + } + buf[pos]= 0; + DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf)); + } + } +} +#else +#define print_records(a,b) +#endif + + +#ifndef DBUG_OFF +static void dbug_print_table(const char *info, TABLE *table) +{ + if (table == 0) + { + DBUG_PRINT("info",("%s: (null)", info)); + return; + } + DBUG_PRINT("info", + ("%s: %s.%s s->fields: %d " + "reclength: %d rec_buff_length: %d record[0]: %lx " + "record[1]: %lx", + info, + table->s->db.str, + table->s->table_name.str, + table->s->fields, + table->s->reclength, + table->s->rec_buff_length, + table->record[0], + table->record[1])); + + for (unsigned int i= 0; i < table->s->fields; i++) + { + Field *f= table->field[i]; + DBUG_PRINT("info", + ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d " + "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", + i, + f->field_name, + f->flags, + (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", + (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", + (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", + (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "", + (f->flags & BLOB_FLAG) ? ",blob" : "", + (f->flags & BINARY_FLAG) ? ",binary" : "", + f->real_type(), + f->pack_length(), + f->ptr, f->ptr - table->record[0], + f->null_bit, + f->null_ptr, (byte*) f->null_ptr - table->record[0])); + if (f->type() == MYSQL_TYPE_BIT) + { + Field_bit *g= (Field_bit*) f; + DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " + "bit_ofs: %u bit_len: %u", + g->field_length, g->bit_ptr, + (byte*) g->bit_ptr-table->record[0], + g->bit_ofs, g->bit_len)); + } + } +} +#else +#define dbug_print_table(a,b) +#endif + + +/* + Run a query through mysql_parse + + Used to: + - purging the cluster_replication.binlog_index + - creating the cluster_replication.apply_status table +*/ +static void run_query(THD *thd, char *buf, char *end, + my_bool print_error, my_bool disable_binlog) +{ + ulong save_query_length= thd->query_length; + char *save_query= thd->query; + ulong save_thread_id= thd->variables.pseudo_thread_id; + ulonglong save_thd_options= thd->options; + DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); + NET save_net= thd->net; + + bzero((char*) &thd->net, sizeof(NET)); + thd->query_length= end - buf; + thd->query= buf; + thd->variables.pseudo_thread_id= thread_id; + if (disable_binlog) + thd->options&= ~OPTION_BIN_LOG; + + DBUG_PRINT("query", ("%s", thd->query)); + mysql_parse(thd, thd->query, thd->query_length); + + if (print_error && thd->query_error) + { + sql_print_error("NDB: %s: error %s %d %d %d", + buf, thd->net.last_error, thd->net.last_errno, + thd->net.report_error, thd->query_error); + } + + thd->options= save_thd_options; + thd->query_length= save_query_length; + thd->query= save_query; + thd->variables.pseudo_thread_id= save_thread_id; + thd->net= save_net; + + if (thd == injector_thd) + { + /* + running the query will close all tables, including the binlog_index + used in injector_thd + */ + binlog_index= 0; + } +} + +/* + Initialize the binlog part of the NDB_SHARE +*/ +void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) +{ + THD *thd= current_thd; + MEM_ROOT *mem_root= &share->mem_root; + + share->op= 0; + share->table= 0; + if (ndb_binlog_thread_running <= 0) + { + DBUG_ASSERT(_table != 0); + if (_table->s->primary_key == MAX_KEY) + share->flags|= NSF_HIDDEN_PK; + return; + } + while (1) + { + TABLE_SHARE *table_share= + (TABLE_SHARE *) my_malloc(sizeof(*table_share), MYF(MY_WME)); + TABLE *table= (TABLE*) my_malloc(sizeof(*table), MYF(MY_WME)); + int error; + + init_tmp_table_share(table_share, share->db, 0, share->table_name, + share->key); + if ((error= open_table_def(thd, table_share, 0))) + { + sql_print_error("Unable to get table share for %s, error=%d", + share->key, error); + DBUG_PRINT("error", ("open_table_def failed %d", error)); + my_free((gptr) table_share, MYF(0)); + table_share= 0; + my_free((gptr) table, MYF(0)); + table= 0; + break; + } + if ((error= open_table_from_share(thd, table_share, "", 0, + (uint) READ_ALL, 0, table))) + { + sql_print_error("Unable to open table for %s, error=%d(%d)", + share->key, error, my_errno); + DBUG_PRINT("error", ("open_table_from_share failed %d", error)); + my_free((gptr) table_share, MYF(0)); + table_share= 0; + my_free((gptr) table, MYF(0)); + table= 0; + break; + } + assign_new_table_id(table); + if (!table->record[1] || table->record[1] == table->record[0]) + { + table->record[1]= alloc_root(&table->mem_root, + table->s->rec_buff_length); + } + table->in_use= injector_thd; + + table->s->db.str= share->db; + table->s->db.length= strlen(share->db); + table->s->table_name.str= share->table_name; + table->s->table_name.length= strlen(share->table_name); + + share->table_share= table_share; + share->table= table; +#ifndef DBUG_OFF + dbug_print_table("table", table); +#endif + /* + ! do not touch the contents of the table + it may be in use by the injector thread + */ + share->ndb_value[0]= (NdbValue*) + alloc_root(mem_root, sizeof(NdbValue) * table->s->fields + + 1 /*extra for hidden key*/); + share->ndb_value[1]= (NdbValue*) + alloc_root(mem_root, sizeof(NdbValue) * table->s->fields + +1 /*extra for hidden key*/); + { + int i, no_nodes= g_ndb_cluster_connection->no_db_nodes(); + share->subscriber_bitmap= (MY_BITMAP*) + alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP)); + for (i= 0; i < no_nodes; i++) + { + bitmap_init(&share->subscriber_bitmap[i], + (Uint32*)alloc_root(mem_root, max_ndb_nodes/8), + max_ndb_nodes, false); + bitmap_clear_all(&share->subscriber_bitmap[i]); + } + bitmap_init(&share->slock_bitmap, share->slock, + sizeof(share->slock)*8, false); + bitmap_clear_all(&share->slock_bitmap); + } + if (table->s->primary_key == MAX_KEY) + share->flags|= NSF_HIDDEN_PK; + break; + } +} + +/***************************************************************** + functions called from master sql client threads +****************************************************************/ + +/* + called in mysql_show_binlog_events and reset_logs to make sure we wait for + all events originating from this mysql server to arrive in the binlog + + Wait for the last epoch in which the last transaction is a part of. + + Wait a maximum of 30 seconds. +*/ +static void ndbcluster_binlog_wait(THD *thd) +{ + if (ndb_binlog_thread_running > 0) + { + DBUG_ENTER("ndbcluster_binlog_wait"); + const char *save_info= thd ? thd->proc_info : 0; + ulonglong wait_epoch= g_latest_trans_gci; + int count= 30; + if (thd) + thd->proc_info= "Waiting for ndbcluster binlog update to " + "reach current position"; + while (count && ndb_binlog_thread_running > 0 && + ndb_latest_handled_binlog_epoch < wait_epoch) + { + count--; + sleep(1); + } + if (thd) + thd->proc_info= save_info; + DBUG_VOID_RETURN; + } +} + +/* + Called from MYSQL_LOG::reset_logs in log.cc when binlog is emptied +*/ +static int ndbcluster_reset_logs(THD *thd) +{ + if (ndb_binlog_thread_running <= 0) + return 0; + + DBUG_ENTER("ndbcluster_reset_logs"); + + /* + Wait for all events orifinating from this mysql server has + reached the binlog before continuing to reset + */ + ndbcluster_binlog_wait(thd); + + char buf[1024]; + char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_REP_TABLE); + + run_query(thd, buf, end, FALSE, TRUE); + + DBUG_RETURN(0); +} + +/* + Called from MYSQL_LOG::purge_logs in log.cc when the binlog "file" + is removed +*/ + +static int +ndbcluster_binlog_index_purge_file(THD *thd, const char *file) +{ + if (ndb_binlog_thread_running <= 0) + return 0; + + DBUG_ENTER("ndbcluster_binlog_index_purge_file"); + DBUG_PRINT("enter", ("file: %s", file)); + + char buf[1024]; + char *end= strmov(strmov(strmov(buf, + "DELETE FROM " + NDB_REP_DB "." NDB_REP_TABLE + " WHERE File='"), file), "'"); + + run_query(thd, buf, end, FALSE, TRUE); + + DBUG_RETURN(0); +} + +static void +ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command, + const char *query, uint query_length, + const char *db, const char *table_name) +{ + DBUG_ENTER("ndbcluster_binlog_log_query"); + DBUG_PRINT("enter", ("db: %s table_name: %s query: %s", + db, table_name, query)); + DBUG_VOID_RETURN; +} + +/* + End use of the NDB Cluster table handler + - free all global variables allocated by + ndbcluster_init() +*/ + +static int ndbcluster_binlog_end(THD *thd) +{ + DBUG_ENTER("ndb_binlog_end"); + + if (!ndbcluster_util_inited) + DBUG_RETURN(0); + + // Kill ndb utility thread + (void) pthread_mutex_lock(&LOCK_ndb_util_thread); + DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread)); + (void) pthread_cond_signal(&COND_ndb_util_thread); + (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); + +#ifdef HAVE_NDB_BINLOG + /* wait for injector thread to finish */ + if (ndb_binlog_thread_running > 0) + { + pthread_mutex_lock(&injector_mutex); + while (ndb_binlog_thread_running > 0) + { + struct timespec abstime; + set_timespec(abstime, 1); + pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime); + } + pthread_mutex_unlock(&injector_mutex); + } + + /* remove all shares */ + { + pthread_mutex_lock(&ndbcluster_mutex); + for (uint i= 0; i < ndbcluster_open_tables.records; i++) + { + NDB_SHARE *share= + (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i); + if (share->table) + DBUG_PRINT("share", + ("table->s->db.table_name: %s.%s", + share->table->s->db.str, share->table->s->table_name.str)); + if (share->state != NSS_DROPPED && !--share->use_count) + real_free_share(&share); + else + { + DBUG_PRINT("share", + ("[%d] 0x%lx key: %s key_length: %d", + i, share, share->key, share->key_length)); + DBUG_PRINT("share", + ("db.tablename: %s.%s use_count: %d commit_count: %d", + share->db, share->table_name, + share->use_count, share->commit_count)); + } + } + pthread_mutex_unlock(&ndbcluster_mutex); + } +#endif + ndbcluster_util_inited= 0; + DBUG_RETURN(0); +} + +/***************************************************************** + functions called from slave sql client threads +****************************************************************/ +static void ndbcluster_reset_slave(THD *thd) +{ + if (ndb_binlog_thread_running <= 0) + return; + + DBUG_ENTER("ndbcluster_reset_slave"); + char buf[1024]; + char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_APPLY_TABLE); + run_query(thd, buf, end, FALSE, TRUE); + DBUG_VOID_RETURN; +} + +/* + Initialize the binlog part of the ndb handlerton +*/ +static int ndbcluster_binlog_func(THD *thd, enum_binlog_func fn, void *arg) +{ + switch(fn) + { + case BFN_RESET_LOGS: + ndbcluster_reset_logs(thd); + break; + case BFN_RESET_SLAVE: + ndbcluster_reset_slave(thd); + break; + case BFN_BINLOG_WAIT: + ndbcluster_binlog_wait(thd); + break; + case BFN_BINLOG_END: + ndbcluster_binlog_end(thd); + break; + case BFN_BINLOG_PURGE_FILE: + ndbcluster_binlog_index_purge_file(thd, (const char *)arg); + break; + } + return 0; +} + +void ndbcluster_binlog_init_handlerton() +{ + handlerton &h= ndbcluster_hton; + h.binlog_func= ndbcluster_binlog_func; + h.binlog_log_query= ndbcluster_binlog_log_query; +} + + + + + +/* + check the availability af the cluster_replication.apply_status share + - return share, but do not increase refcount + - return 0 if there is no share +*/ +static NDB_SHARE *ndbcluster_check_apply_status_share() +{ + pthread_mutex_lock(&ndbcluster_mutex); + + void *share= hash_search(&ndbcluster_open_tables, + NDB_APPLY_TABLE_FILE, + sizeof(NDB_APPLY_TABLE_FILE) - 1); + DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p", + NDB_APPLY_TABLE_FILE, share)); + pthread_mutex_unlock(&ndbcluster_mutex); + return (NDB_SHARE*) share; +} + +/* + check the availability af the cluster_replication.schema share + - return share, but do not increase refcount + - return 0 if there is no share +*/ +static NDB_SHARE *ndbcluster_check_schema_share() +{ + pthread_mutex_lock(&ndbcluster_mutex); + + void *share= hash_search(&ndbcluster_open_tables, + NDB_SCHEMA_TABLE_FILE, + sizeof(NDB_SCHEMA_TABLE_FILE) - 1); + DBUG_PRINT("info",("ndbcluster_check_schema_share %s %p", + NDB_SCHEMA_TABLE_FILE, share)); + pthread_mutex_unlock(&ndbcluster_mutex); + return (NDB_SHARE*) share; +} + +/* + Create the cluster_replication.apply_status table +*/ +static int ndbcluster_create_apply_status_table(THD *thd) +{ + DBUG_ENTER("ndbcluster_create_apply_status_table"); + + /* + Check if we already have the apply status table. + If so it should have been discovered at startup + and thus have a share + */ + + if (ndbcluster_check_apply_status_share()) + DBUG_RETURN(0); + + if (g_ndb_cluster_connection->get_no_ready() <= 0) + DBUG_RETURN(0); + + char buf[1024], *end; + + if (ndb_extra_logging) + sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_APPLY_TABLE); + + /* + Check if apply status table exists in MySQL "dictionary" + if so, remove it since there is none in Ndb + */ + { + strxnmov(buf, sizeof(buf), + mysql_data_home, + "/" NDB_REP_DB "/" NDB_APPLY_TABLE, + reg_ext, NullS); + unpack_filename(buf,buf); + my_delete(buf, MYF(0)); + } + + /* + Note, updating this table schema must be reflected in ndb_restore + */ + end= strmov(buf, "CREATE TABLE IF NOT EXISTS " + NDB_REP_DB "." NDB_APPLY_TABLE + " ( server_id INT UNSIGNED NOT NULL," + " epoch BIGINT UNSIGNED NOT NULL, " + " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); + + run_query(thd, buf, end, TRUE, TRUE); + + DBUG_RETURN(0); +} + + +/* + Create the cluster_replication.schema table +*/ +static int ndbcluster_create_schema_table(THD *thd) +{ + DBUG_ENTER("ndbcluster_create_schema_table"); + + /* + Check if we already have the schema table. + If so it should have been discovered at startup + and thus have a share + */ + + if (ndbcluster_check_schema_share()) + DBUG_RETURN(0); + + if (g_ndb_cluster_connection->get_no_ready() <= 0) + DBUG_RETURN(0); + + char buf[1024], *end; + + if (ndb_extra_logging) + sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_SCHEMA_TABLE); + + /* + Check if schema table exists in MySQL "dictionary" + if so, remove it since there is none in Ndb + */ + { + strxnmov(buf, sizeof(buf), + mysql_data_home, + "/" NDB_REP_DB "/" NDB_SCHEMA_TABLE, + reg_ext, NullS); + unpack_filename(buf,buf); + my_delete(buf, MYF(0)); + } + + /* + Update the defines below to reflect the table schema + */ + end= strmov(buf, "CREATE TABLE IF NOT EXISTS " + NDB_REP_DB "." NDB_SCHEMA_TABLE + " ( db VARCHAR(63) NOT NULL," + " name VARCHAR(63) NOT NULL," + " slock BINARY(32) NOT NULL," + " query VARCHAR(4094) NOT NULL," + " node_id INT UNSIGNED NOT NULL," + " epoch BIGINT UNSIGNED NOT NULL," + " id INT UNSIGNED NOT NULL," + " version INT UNSIGNED NOT NULL," + " type INT UNSIGNED NOT NULL," + " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); + + run_query(thd, buf, end, TRUE, TRUE); + + DBUG_RETURN(0); +} + +void ndbcluster_setup_binlog_table_shares(THD *thd) +{ + int done_find_all_files= 0; + if (!apply_status_share && + ndbcluster_check_apply_status_share() == 0) + { + if (!done_find_all_files) + { + ndbcluster_find_all_files(thd); + done_find_all_files= 1; + } + ndbcluster_create_apply_status_table(thd); + } + if (!schema_share && + ndbcluster_check_schema_share() == 0) + { + if (!done_find_all_files) + { + ndbcluster_find_all_files(thd); + done_find_all_files= 1; + } + ndbcluster_create_schema_table(thd); + } +} + +/* + Defines and struct for schema table. + Should reflect table definition above. +*/ +#define SCHEMA_DB_I 0u +#define SCHEMA_NAME_I 1u +#define SCHEMA_SLOCK_I 2u +#define SCHEMA_QUERY_I 3u +#define SCHEMA_NODE_ID_I 4u +#define SCHEMA_EPOCH_I 5u +#define SCHEMA_ID_I 6u +#define SCHEMA_VERSION_I 7u +#define SCHEMA_TYPE_I 8u +#define SCHEMA_SIZE 9u +#define SCHEMA_SLOCK_SIZE 32u +#define SCHEMA_QUERY_SIZE 4096u + +struct Cluster_replication_schema +{ + unsigned char db_length; + char db[64]; + unsigned char name_length; + char name[64]; + unsigned char slock_length; + uint32 slock[SCHEMA_SLOCK_SIZE/4]; + unsigned short query_length; + char query[SCHEMA_QUERY_SIZE]; + Uint64 epoch; + uint32 node_id; + uint32 id; + uint32 version; + uint32 type; +}; + +/* + Transfer schema table data into corresponding struct +*/ +static void ndbcluster_get_schema(TABLE *table, + Cluster_replication_schema *s) +{ + Field **field; + /* db varchar 1 length byte */ + field= table->field; + s->db_length= *(uint8*)(*field)->ptr; + DBUG_ASSERT(s->db_length <= (*field)->field_length); + DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->db)); + memcpy(s->db, (*field)->ptr + 1, s->db_length); + s->db[s->db_length]= 0; + /* name varchar 1 length byte */ + field++; + s->name_length= *(uint8*)(*field)->ptr; + DBUG_ASSERT(s->name_length <= (*field)->field_length); + DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->name)); + memcpy(s->name, (*field)->ptr + 1, s->name_length); + s->name[s->name_length]= 0; + /* slock fixed length */ + field++; + s->slock_length= (*field)->field_length; + DBUG_ASSERT((*field)->field_length == sizeof(s->slock)); + memcpy(s->slock, (*field)->ptr, s->slock_length); + /* query varchar 2 length bytes */ + field++; + s->query_length= uint2korr((*field)->ptr); + DBUG_ASSERT(s->query_length <= (*field)->field_length); + DBUG_ASSERT((*field)->field_length + 2 == sizeof(s->query)); + memcpy(s->query, (*field)->ptr + 2, s->query_length); + s->query[s->query_length]= 0; + /* node_id */ + field++; + s->node_id= ((Field_long *)*field)->val_int(); + /* epoch */ + field++; + s->epoch= ((Field_long *)*field)->val_int(); + /* id */ + field++; + s->id= ((Field_long *)*field)->val_int(); + /* version */ + field++; + s->version= ((Field_long *)*field)->val_int(); + /* type */ + field++; + s->type= ((Field_long *)*field)->val_int(); +} + +/* + helper function to pack a ndb varchar +*/ +static char *ndb_pack_varchar(const NDBCOL *col, char *buf, + const char *str, int sz) +{ + switch (col->getArrayType()) + { + case NDBCOL::ArrayTypeFixed: + memcpy(buf, str, sz); + break; + case NDBCOL::ArrayTypeShortVar: + *(unsigned char*)buf= (unsigned char)sz; + memcpy(buf + 1, str, sz); + break; + case NDBCOL::ArrayTypeMediumVar: + int2store(buf, sz); + memcpy(buf + 2, str, sz); + break; + } + return buf; +} + +/* + log query in schema table +*/ +int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, + const char *query, int query_length, + const char *db, const char *table_name, + uint32 ndb_table_id, + uint32 ndb_table_version, + enum SCHEMA_OP_TYPE type) +{ + DBUG_ENTER("ndbcluster_log_schema_op"); +#ifdef NOT_YET + Thd_ndb *thd_ndb= get_thd_ndb(thd); + if (!thd_ndb) + { + if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) + { + sql_print_error("Could not allocate Thd_ndb object"); + DBUG_RETURN(1); + } + set_thd_ndb(thd, thd_ndb); + } + + DBUG_PRINT("enter", + ("query: %s db: %s table_name: %s thd_ndb->options: %d", + query, db, table_name, thd_ndb->options)); + if (!schema_share || thd_ndb->options & TNO_NO_LOG_SCHEMA_OP) + { + DBUG_RETURN(0); + } + + char tmp_buf2[FN_REFLEN]; + switch (type) + { + case SOT_DROP_TABLE: + /* drop database command, do not log at drop table */ + if (thd->lex->sql_command == SQLCOM_DROP_DB) + DBUG_RETURN(0); + /* redo the drop table query as is may contain several tables */ + query= tmp_buf2; + query_length= (uint) (strxmov(tmp_buf2, "drop table `", + table_name, "`", NullS) - tmp_buf2); + break; + case SOT_CREATE_TABLE: + break; + case SOT_RENAME_TABLE: + break; + case SOT_ALTER_TABLE: + break; + case SOT_DROP_DB: + break; + case SOT_CREATE_DB: + break; + case SOT_ALTER_DB: + break; + default: + abort(); /* should not happen, programming error */ + } + + const NdbError *ndb_error= 0; + uint32 node_id= g_ndb_cluster_connection->node_id(); + Uint64 epoch= 0; + MY_BITMAP schema_subscribers; + uint32 bitbuf[sizeof(schema_share->slock)/4]; + { + int i; + bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false); + bitmap_set_all(&schema_subscribers); + (void) pthread_mutex_lock(&schema_share->mutex); + for (i= 0; i < ndb_number_of_storage_nodes; i++) + { + MY_BITMAP *table_subscribers= &schema_share->subscriber_bitmap[i]; + if (!bitmap_is_clear_all(table_subscribers)) + bitmap_intersect(&schema_subscribers, + table_subscribers); + } + (void) pthread_mutex_unlock(&schema_share->mutex); + bitmap_clear_bit(&schema_subscribers, node_id); + + if (share) + { + (void) pthread_mutex_lock(&share->mutex); + memcpy(share->slock, schema_subscribers.bitmap, sizeof(share->slock)); + (void) pthread_mutex_unlock(&share->mutex); + } + + DBUG_DUMP("schema_subscribers", (char*)schema_subscribers.bitmap, + no_bytes_in_map(&schema_subscribers)); + DBUG_PRINT("info", ("bitmap_is_clear_all(&schema_subscribers): %d", + bitmap_is_clear_all(&schema_subscribers))); + } + + Ndb *ndb= thd_ndb->ndb; + char old_db[128]; + strcpy(old_db, ndb->getDatabaseName()); + + char tmp_buf[SCHEMA_QUERY_SIZE]; + NDBDICT *dict= ndb->getDictionary(); + ndb->setDatabaseName(NDB_REP_DB); + const NDBTAB *ndbtab= dict->getTable(NDB_SCHEMA_TABLE); + NdbTransaction *trans= 0; + int retries= 100; + const NDBCOL *col[SCHEMA_SIZE]; + unsigned sz[SCHEMA_SIZE]; + + if (ndbtab == 0) + { + if (strcmp(NDB_REP_DB, db) != 0 || + strcmp(NDB_SCHEMA_TABLE, table_name)) + { + ndb_error= &dict->getNdbError(); + goto end; + } + DBUG_RETURN(0); + } + + { + uint i; + for (i= 0; i < SCHEMA_SIZE; i++) + { + col[i]= ndbtab->getColumn(i); + sz[i]= col[i]->getLength(); + DBUG_ASSERT(sz[i] <= sizeof(tmp_buf)); + } + } + + while (1) + { + if ((trans= ndb->startTransaction()) == 0) + goto err; + { + NdbOperation *op= 0; + int r= 0; + r|= (op= trans->getNdbOperation(ndbtab)) == 0; + DBUG_ASSERT(r == 0); + r|= op->writeTuple(); + DBUG_ASSERT(r == 0); + + /* db */ + ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db)); + r|= op->equal(SCHEMA_DB_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* name */ + ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name, + strlen(table_name)); + r|= op->equal(SCHEMA_NAME_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* slock */ + DBUG_ASSERT(sz[SCHEMA_SLOCK_I] == sizeof(bitbuf)); + r|= op->setValue(SCHEMA_SLOCK_I, (char*)schema_subscribers.bitmap); + DBUG_ASSERT(r == 0); + /* query */ + ndb_pack_varchar(col[SCHEMA_QUERY_I], tmp_buf, query, query_length); + r|= op->setValue(SCHEMA_QUERY_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* node_id */ + r|= op->setValue(SCHEMA_NODE_ID_I, node_id); + DBUG_ASSERT(r == 0); + /* epoch */ + r|= op->setValue(SCHEMA_EPOCH_I, epoch); + DBUG_ASSERT(r == 0); + /* id */ + r|= op->setValue(SCHEMA_ID_I, ndb_table_id); + DBUG_ASSERT(r == 0); + /* version */ + r|= op->setValue(SCHEMA_VERSION_I, ndb_table_version); + DBUG_ASSERT(r == 0); + /* type */ + r|= op->setValue(SCHEMA_TYPE_I, (uint32)type); + DBUG_ASSERT(r == 0); + } + if (trans->execute(NdbTransaction::Commit) == 0) + { + dict->forceGCPWait(); + DBUG_PRINT("info", ("logged: %s", query)); + break; + } +err: + if (trans->getNdbError().status == NdbError::TemporaryError) + { + if (retries--) + { + ndb->closeTransaction(trans); + continue; // retry + } + } + ndb_error= &trans->getNdbError(); + break; + } +end: + if (ndb_error) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + ndb_error->code, + ndb_error->message, + "Could not log query '%s' on other mysqld's"); + + if (trans) + ndb->closeTransaction(trans); + ndb->setDatabaseName(old_db); + + /* + Wait for other mysqld's to acknowledge the table operation + */ + if (ndb_error == 0 && + (type == SOT_CREATE_TABLE || + type == SOT_RENAME_TABLE || + type == SOT_ALTER_TABLE) && + !bitmap_is_clear_all(&schema_subscribers)) + { + int max_timeout= 10; + (void) pthread_mutex_lock(&share->mutex); + while (1) + { + struct timespec abstime; + int i; + set_timespec(abstime, 1); + (void) pthread_cond_timedwait(&injector_cond, + &share->mutex, + &abstime); + + (void) pthread_mutex_lock(&schema_share->mutex); + for (i= 0; i < ndb_number_of_storage_nodes; i++) + { + /* remove any unsubscribed from schema_subscribers */ + MY_BITMAP *tmp= &schema_share->subscriber_bitmap[i]; + if (!bitmap_is_clear_all(tmp)) + bitmap_intersect(&schema_subscribers, tmp); + } + (void) pthread_mutex_unlock(&schema_share->mutex); + + /* remove any unsubscribed from share->slock */ + bitmap_intersect(&share->slock_bitmap, &schema_subscribers); + + DBUG_DUMP("share->slock_bitmap.bitmap", (char*)share->slock_bitmap.bitmap, + no_bytes_in_map(&share->slock_bitmap)); + + if (bitmap_is_clear_all(&share->slock_bitmap)) + break; + + max_timeout--; + if (max_timeout == 0) + { + sql_print_error("NDB create table: timed out. Ignoring..."); + break; + } + sql_print_information("NDB create table: " + "waiting max %u sec for create table %s.", + max_timeout, share->key); + } + (void) pthread_mutex_unlock(&share->mutex); + } +#endif + DBUG_RETURN(0); +} + +/* + acknowledge handling of schema operation +*/ +static int +ndbcluster_update_slock(THD *thd, + const char *db, + const char *table_name) +{ + DBUG_ENTER("ndbcluster_update_slock"); + if (!schema_share) + { + DBUG_RETURN(0); + } + + const NdbError *ndb_error= 0; + uint32 node_id= g_ndb_cluster_connection->node_id(); + Ndb *ndb= check_ndb_in_thd(thd); + char old_db[128]; + strcpy(old_db, ndb->getDatabaseName()); + + char tmp_buf[SCHEMA_QUERY_SIZE]; + NDBDICT *dict= ndb->getDictionary(); + ndb->setDatabaseName(NDB_REP_DB); + const NDBTAB *ndbtab= dict->getTable(NDB_SCHEMA_TABLE); + NdbTransaction *trans= 0; + int retries= 100; + const NDBCOL *col[SCHEMA_SIZE]; + unsigned sz[SCHEMA_SIZE]; + + MY_BITMAP slock; + uint32 bitbuf[SCHEMA_SLOCK_SIZE/4]; + bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, false); + + if (ndbtab == 0) + { + abort(); + DBUG_RETURN(0); + } + + { + uint i; + for (i= 0; i < SCHEMA_SIZE; i++) + { + col[i]= ndbtab->getColumn(i); + sz[i]= col[i]->getLength(); + DBUG_ASSERT(sz[i] <= sizeof(tmp_buf)); + } + } + + while (1) + { + if ((trans= ndb->startTransaction()) == 0) + goto err; + { + NdbOperation *op= 0; + int r= 0; + + /* read the bitmap exlusive */ + r|= (op= trans->getNdbOperation(ndbtab)) == 0; + DBUG_ASSERT(r == 0); + r|= op->readTupleExclusive(); + DBUG_ASSERT(r == 0); + + /* db */ + ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db)); + r|= op->equal(SCHEMA_DB_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* name */ + ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name, + strlen(table_name)); + r|= op->equal(SCHEMA_NAME_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* slock */ + r|= op->getValue(SCHEMA_SLOCK_I, (char*)slock.bitmap) == 0; + DBUG_ASSERT(r == 0); + } + if (trans->execute(NdbTransaction::NoCommit)) + goto err; + bitmap_clear_bit(&slock, node_id); + { + NdbOperation *op= 0; + int r= 0; + + /* now update the tuple */ + r|= (op= trans->getNdbOperation(ndbtab)) == 0; + DBUG_ASSERT(r == 0); + r|= op->updateTuple(); + DBUG_ASSERT(r == 0); + + /* db */ + ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db)); + r|= op->equal(SCHEMA_DB_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* name */ + ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name, + strlen(table_name)); + r|= op->equal(SCHEMA_NAME_I, tmp_buf); + DBUG_ASSERT(r == 0); + /* slock */ + r|= op->setValue(SCHEMA_SLOCK_I, (char*)slock.bitmap); + DBUG_ASSERT(r == 0); + /* node_id */ + r|= op->setValue(SCHEMA_NODE_ID_I, node_id); + DBUG_ASSERT(r == 0); + /* type */ + r|= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK); + DBUG_ASSERT(r == 0); + } + if (trans->execute(NdbTransaction::Commit) == 0) + { + dict->forceGCPWait(); + DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'", + node_id, db, table_name)); + break; + } + err: + if (trans->getNdbError().status == NdbError::TemporaryError) + { + if (retries--) + { + ndb->closeTransaction(trans); + continue; // retry + } + } + ndb_error= &trans->getNdbError(); + break; + } +end: + if (ndb_error) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + ndb_error->code, + ndb_error->message, + "Could not release lock on '%s.%s'", + db, table_name); + if (trans) + ndb->closeTransaction(trans); + ndb->setDatabaseName(old_db); + DBUG_RETURN(0); +} + +/* + Handle _non_ data events from the storage nodes +*/ +static int +ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, + NDB_SHARE *share) +{ + int remote_drop_table= 0, do_close_cached_tables= 0; + + if (pOp->getEventType() != NDBEVENT::TE_CLUSTER_FAILURE && + pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) + { + ndb->setDatabaseName(share->table->s->db.str); + ha_ndbcluster::invalidate_dictionary_cache(share->table, + ndb, + share->table->s->table_name.str, + TRUE); + remote_drop_table= 1; + } + + (void) pthread_mutex_lock(&share->mutex); + DBUG_ASSERT(share->op == pOp || share->op_old == pOp); + if (share->op_old == pOp) + share->op_old= 0; + else + share->op= 0; + // either just us or drop table handling as well + + /* Signal ha_ndbcluster::delete/rename_table that drop is done */ + (void) pthread_mutex_unlock(&share->mutex); + (void) pthread_cond_signal(&injector_cond); + + pthread_mutex_lock(&ndbcluster_mutex); + free_share(&share, TRUE); + if (remote_drop_table && share && share->state != NSS_DROPPED) + { + DBUG_PRINT("info", ("remote drop table")); + if (share->use_count != 1) + do_close_cached_tables= 1; + share->state= NSS_DROPPED; + free_share(&share, TRUE); + } + pthread_mutex_unlock(&ndbcluster_mutex); + + share= 0; + pOp->setCustomData(0); + + pthread_mutex_lock(&injector_mutex); + injector_ndb->dropEventOperation(pOp); + pOp= 0; + pthread_mutex_unlock(&injector_mutex); + + if (do_close_cached_tables) + close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0); + return 0; +} + +static int +ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, + NdbEventOperation *pOp, + List<Cluster_replication_schema> + *schema_list, MEM_ROOT *mem_root) +{ + DBUG_ENTER("ndb_binlog_thread_handle_schema_event"); + NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); + if (share && schema_share == share) + { + NDBEVENT::TableEvent ev_type= pOp->getEventType(); + DBUG_PRINT("enter", ("%s.%s ev_type: %d", + share->db, share->table_name, ev_type)); + switch (ev_type) + { + case NDBEVENT::TE_UPDATE: + case NDBEVENT::TE_INSERT: + { + Cluster_replication_schema *schema= (Cluster_replication_schema *) + sql_alloc(sizeof(Cluster_replication_schema)); + MY_BITMAP slock; + bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, false); + uint node_id= g_ndb_cluster_connection->node_id(); + ndbcluster_get_schema(share->table, schema); + if (schema->node_id != node_id) + { + int log_query= 0; + DBUG_PRINT("info", ("log query_length: %d query: '%s'", + schema->query_length, schema->query)); + switch ((enum SCHEMA_OP_TYPE)schema->type) + { + case SOT_DROP_TABLE: + /* binlog dropping table after any table operations */ + schema_list->push_back(schema, mem_root); + log_query= 0; + break; + case SOT_CREATE_TABLE: + /* fall through */ + case SOT_RENAME_TABLE: + /* fall through */ + case SOT_ALTER_TABLE: + pthread_mutex_lock(&LOCK_open); + if (ha_create_table_from_engine(thd, schema->db, schema->name)) + { + sql_print_error("Could not discover table '%s.%s' from " + "binlog schema event '%s' from node %d", + schema->db, schema->name, schema->query, + schema->node_id); + } + pthread_mutex_unlock(&LOCK_open); + { + /* signal that schema operation has been handled */ + DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length); + if (bitmap_is_set(&slock, node_id)) + ndbcluster_update_slock(thd, schema->db, schema->name); + } + log_query= 1; + break; + case SOT_DROP_DB: + run_query(thd, schema->query, + schema->query + schema->query_length, + TRUE, /* print error */ + TRUE); /* don't binlog the query */ + /* binlog dropping database after any table operations */ + schema_list->push_back(schema, mem_root); + log_query= 0; + break; + case SOT_CREATE_DB: + /* fall through */ + case SOT_ALTER_DB: + run_query(thd, schema->query, + schema->query + schema->query_length, + TRUE, /* print error */ + FALSE); /* binlog the query */ + log_query= 0; + break; + case SOT_CLEAR_SLOCK: + { + char key[FN_REFLEN]; + (void)strxnmov(key, FN_REFLEN, share_prefix, schema->db, + "/", schema->name, NullS); + NDB_SHARE *share= get_share(key, 0, false, false); + if (share) + { + pthread_mutex_lock(&share->mutex); + memcpy(share->slock, schema->slock, sizeof(share->slock)); + DBUG_DUMP("share->slock_bitmap.bitmap", + (char*)share->slock_bitmap.bitmap, + no_bytes_in_map(&share->slock_bitmap)); + pthread_mutex_unlock(&share->mutex); + pthread_cond_signal(&injector_cond); + free_share(&share); + } + DBUG_RETURN(0); + } + } + if (log_query) + { + char *thd_db_save= thd->db; + thd->db= schema->db; + thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, + schema->query_length, FALSE, + schema->name[0] == 0); + thd->db= thd_db_save; + } + } + } + break; + case NDBEVENT::TE_DELETE: + // skip + break; + case NDBEVENT::TE_ALTER: + /* do the rename of the table in the share */ + share->table->s->db.str= share->db; + share->table->s->db.length= strlen(share->db); + share->table->s->table_name.str= share->table_name; + share->table->s->table_name.length= strlen(share->table_name); + ndb_handle_schema_change(thd, ndb, pOp, share); + break; + case NDBEVENT::TE_CLUSTER_FAILURE: + case NDBEVENT::TE_DROP: + free_share(&schema_share); + schema_share= 0; + ndb_handle_schema_change(thd, ndb, pOp, share); + break; + case NDBEVENT::TE_NODE_FAILURE: + { + uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; + DBUG_ASSERT(node_id != 0xFF); + (void) pthread_mutex_lock(&share->mutex); + bitmap_clear_all(&share->subscriber_bitmap[node_id]); + DBUG_PRINT("info",("NODE_FAILURE UNSUBSCRIBE[%d]", node_id)); + (void) pthread_mutex_unlock(&share->mutex); + (void) pthread_cond_signal(&injector_cond); + break; + } + case NDBEVENT::TE_SUBSCRIBE: + { + uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; + uint8 req_id= pOp->getReqNodeId(); + DBUG_ASSERT(req_id != 0 && node_id != 0xFF); + (void) pthread_mutex_lock(&share->mutex); + bitmap_set_bit(&share->subscriber_bitmap[node_id], req_id); + DBUG_PRINT("info",("SUBSCRIBE[%d] %d", node_id, req_id)); + (void) pthread_mutex_unlock(&share->mutex); + (void) pthread_cond_signal(&injector_cond); + break; + } + case NDBEVENT::TE_UNSUBSCRIBE: + { + uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; + uint8 req_id= pOp->getReqNodeId(); + DBUG_ASSERT(req_id != 0 && node_id != 0xFF); + (void) pthread_mutex_lock(&share->mutex); + bitmap_clear_bit(&share->subscriber_bitmap[node_id], req_id); + DBUG_PRINT("info",("UNSUBSCRIBE[%d] %d", node_id, req_id)); + (void) pthread_mutex_unlock(&share->mutex); + (void) pthread_cond_signal(&injector_cond); + break; + } + default: + sql_print_error("NDB Binlog: unknown non data event %d for %s. " + "Ignoring...", (unsigned) ev_type, share->key); + } + } + DBUG_RETURN(0); +} + +/* + Timer class for doing performance measurements +*/ + +/********************************************************************* + Internal helper functions for handeling of the cluster replication tables + - cluster_replication.binlog_index + - cluster_replication.apply_status +*********************************************************************/ + +/* + struct to hold the data to be inserted into the + cluster_replication.binlog_index table +*/ +struct Binlog_index_row { + ulonglong gci; + const char *master_log_file; + ulonglong master_log_pos; + ulonglong n_inserts; + ulonglong n_updates; + ulonglong n_deletes; + ulonglong n_schemaops; +}; + +/* + Open the cluster_replication.binlog_index table +*/ +static int open_binlog_index(THD *thd, TABLE_LIST *tables, + TABLE **binlog_index) +{ + static char repdb[]= NDB_REP_DB; + static char reptable[]= NDB_REP_TABLE; + const char *save_proc_info= thd->proc_info; + + bzero((char*) tables, sizeof(*tables)); + tables->db= repdb; + tables->alias= tables->table_name= reptable; + tables->lock_type= TL_WRITE; + thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE; + tables->required_type= FRMTYPE_TABLE; + uint counter; + thd->clear_error(); + if (open_tables(thd, &tables, &counter, MYSQL_LOCK_IGNORE_FLUSH)) + { + sql_print_error("NDB Binlog: Opening binlog_index: %d, '%s'", + thd->net.last_errno, + thd->net.last_error ? thd->net.last_error : ""); + thd->proc_info= save_proc_info; + return -1; + } + *binlog_index= tables->table; + thd->proc_info= save_proc_info; + return 0; +} + +/* + Insert one row in the cluster_replication.binlog_index + + declared friend in handler.h to be able to call write_row directly + so that this insert is not replicated +*/ +int ndb_add_binlog_index(THD *thd, void *_row) +{ + Binlog_index_row &row= *(Binlog_index_row *) _row; + int error= 0; + bool need_reopen; + for ( ; ; ) /* loop for need_reopen */ + { + if (!binlog_index && open_binlog_index(thd, &binlog_tables, &binlog_index)) + { + error= -1; + goto add_binlog_index_err; + } + + if (lock_tables(thd, &binlog_tables, 1, &need_reopen)) + { + if (need_reopen) + { + close_tables_for_reopen(thd, &binlog_tables); + binlog_index= 0; + continue; + } + sql_print_error("NDB Binlog: Unable to lock table binlog_index"); + error= -1; + goto add_binlog_index_err; + } + break; + } + + binlog_index->field[0]->store(row.master_log_pos); + binlog_index->field[1]->store(row.master_log_file, + strlen(row.master_log_file), + &my_charset_bin); + binlog_index->field[2]->store(row.gci); + binlog_index->field[3]->store(row.n_inserts); + binlog_index->field[4]->store(row.n_updates); + binlog_index->field[5]->store(row.n_deletes); + binlog_index->field[6]->store(row.n_schemaops); + + int r; + if ((r= binlog_index->file->write_row(binlog_index->record[0]))) + { + sql_print_error("NDB Binlog: Writing row to binlog_index: %d", r); + error= -1; + goto add_binlog_index_err; + } + + mysql_unlock_tables(thd, thd->lock); + thd->lock= 0; + return 0; +add_binlog_index_err: + close_thread_tables(thd); + binlog_index= 0; + return error; +} + +/********************************************************************* + Functions for start, stop, wait for ndbcluster binlog thread +*********************************************************************/ + +static int do_ndbcluster_binlog_close_connection= 0; + +int ndbcluster_binlog_start() +{ + DBUG_ENTER("ndbcluster_binlog_start"); + + pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST); + pthread_cond_init(&injector_cond, NULL); + + /* Create injector thread */ + if (pthread_create(&ndb_binlog_thread, &connection_attrib, + ndb_binlog_thread_func, 0)) + { + DBUG_PRINT("error", ("Could not create ndb injector thread")); + pthread_cond_destroy(&injector_cond); + pthread_mutex_destroy(&injector_mutex); + DBUG_RETURN(-1); + } + + /* + Wait for the ndb injector thread to finish starting up. + */ + pthread_mutex_lock(&injector_mutex); + while (!ndb_binlog_thread_running) + pthread_cond_wait(&injector_cond, &injector_mutex); + pthread_mutex_unlock(&injector_mutex); + + if (ndb_binlog_thread_running < 0) + DBUG_RETURN(-1); + + DBUG_RETURN(0); +} + +static void ndbcluster_binlog_close_connection(THD *thd) +{ + DBUG_ENTER("ndbcluster_binlog_close_connection"); + const char *save_info= thd->proc_info; + thd->proc_info= "ndbcluster_binlog_close_connection"; + do_ndbcluster_binlog_close_connection= 1; + while (ndb_binlog_thread_running > 0) + sleep(1); + thd->proc_info= save_info; + DBUG_VOID_RETURN; +} + +/************************************************************** + Internal helper functions for creating/dropping ndb events + used by the client sql threads +**************************************************************/ +void +ndb_rep_event_name(String *event_name,const char *db, const char *tbl) +{ + event_name->set_ascii("REPL$", 5); + event_name->append(db); + if (tbl) + { + event_name->append('/'); + event_name->append(tbl); + } +} + +/* + Common function for setting up everything for logging a table at + create/discover. +*/ +int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, + const char *db, + const char *table_name, + NDB_SHARE *share) +{ + DBUG_ENTER("ndbcluster_create_binlog_setup"); + + pthread_mutex_lock(&ndbcluster_mutex); + + /* Handle any trailing share */ + if (share == 0) + { + share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) key, strlen(key)); + if (share) + handle_trailing_share(share); + } + else + handle_trailing_share(share); + + /* Create share which is needed to hold replication information */ + if (!(share= get_share(key, 0, true, true))) + { + sql_print_error("NDB Binlog: " + "allocating table share for %s failed", key); + } + pthread_mutex_unlock(&ndbcluster_mutex); + + while (share && !IS_TMP_PREFIX(table_name)) + { + /* + ToDo make sanity check of share so that the table is actually the same + I.e. we need to do open file from frm in this case + Currently awaiting this to be fixed in the 4.1 tree in the general + case + */ + + /* Create the event in NDB */ + ndb->setDatabaseName(db); + + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *ndbtab= dict->getTable(table_name); + if (ndbtab == 0) + { + if (ndb_extra_logging) + sql_print_information("NDB Binlog: Failed to get table %s from ndb: " + "%s, %d", key, dict->getNdbError().message, + dict->getNdbError().code); + break; // error + } + String event_name(INJECTOR_EVENT_LEN); + ndb_rep_event_name(&event_name, db, table_name); + /* + event should have been created by someone else, + but let's make sure, and create if it doesn't exist + */ + if (!dict->getEvent(event_name.c_ptr())) + { + if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share)) + { + sql_print_error("NDB Binlog: " + "FAILED CREATE (DISCOVER) TABLE Event: %s", + event_name.c_ptr()); + break; // error + } + if (ndb_extra_logging) + sql_print_information("NDB Binlog: " + "CREATE (DISCOVER) TABLE Event: %s", + event_name.c_ptr()); + } + else + if (ndb_extra_logging) + sql_print_information("NDB Binlog: DISCOVER TABLE Event: %s", + event_name.c_ptr()); + + /* + create the event operations for receiving logging events + */ + if (ndbcluster_create_event_ops(share, ndbtab, + event_name.c_ptr()) < 0) + { + sql_print_error("NDB Binlog:" + "FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s", + event_name.c_ptr()); + /* a warning has been issued to the client */ + DBUG_RETURN(0); + } + DBUG_RETURN(0); + } + DBUG_RETURN(-1); +} + +int +ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, + const char *event_name, NDB_SHARE *share) +{ + DBUG_ENTER("ndbcluster_create_event"); + if (!share) + DBUG_RETURN(0); + NDBDICT *dict= ndb->getDictionary(); + NDBEVENT my_event(event_name); + my_event.setTable(*ndbtab); + my_event.addTableEvent(NDBEVENT::TE_ALL); + if (share->flags & NSF_HIDDEN_PK) + { + /* No primary key, susbscribe for all attributes */ + my_event.setReport(NDBEVENT::ER_ALL); + DBUG_PRINT("info", ("subscription all")); + } + else + { + if (schema_share || strcmp(share->db, NDB_REP_DB) || + strcmp(share->table_name, NDB_SCHEMA_TABLE)) + { + my_event.setReport(NDBEVENT::ER_UPDATED); + DBUG_PRINT("info", ("subscription only updated")); + } + else + { + my_event.setReport((NDBEVENT::EventReport) + (NDBEVENT::ER_ALL | NDBEVENT::ER_SUBSCRIBE)); + DBUG_PRINT("info", ("subscription all and subscribe")); + } + } + + /* add all columns to the event */ + int n_cols= ndbtab->getNoOfColumns(); + for(int a= 0; a < n_cols; a++) + my_event.addEventColumn(a); + + if (dict->createEvent(my_event)) // Add event to database + { +#ifdef NDB_BINLOG_EXTRA_WARNINGS + /* + failed, print a warning + */ + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + dict->getNdbError().code, + dict->getNdbError().message, "NDB"); +#endif + if (dict->getNdbError().classification != NdbError::SchemaObjectExists) + { + sql_print_error("NDB Binlog: Unable to create event in database. " + "Event: %s Error Code: %d Message: %s", event_name, + dict->getNdbError().code, dict->getNdbError().message); + DBUG_RETURN(-1); + } + + /* + trailing event from before; an error, but try to correct it + */ + if (dict->dropEvent(my_event.getName())) + { + sql_print_error("NDB Binlog: Unable to create event in database. " + " Attempt to correct with drop failed. " + "Event: %s Error Code: %d Message: %s", + event_name, + dict->getNdbError().code, + dict->getNdbError().message); + DBUG_RETURN(-1); + } + + /* + try to add the event again + */ + if (dict->createEvent(my_event)) + { + sql_print_error("NDB Binlog: Unable to create event in database. " + " Attempt to correct with drop ok, but create failed. " + "Event: %s Error Code: %d Message: %s", + event_name, + dict->getNdbError().code, + dict->getNdbError().message); + DBUG_RETURN(-1); + } +#ifdef NDB_BINLOG_EXTRA_WARNINGS + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + 0, "NDB Binlog: Removed trailing event", + "NDB"); +#endif + } + + DBUG_RETURN(0); +} + +inline int is_ndb_compatible_type(Field *field) +{ + return + !(field->flags & BLOB_FLAG) && + field->type() != MYSQL_TYPE_BIT && + field->pack_length() != 0; +} + +/* + - create eventOperations for receiving log events + - setup ndb recattrs for reception of log event data + - "start" the event operation + + used at create/discover of tables +*/ +int +ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, + const char *event_name) +{ + /* + we are in either create table or rename table so table should be + locked, hence we can work with the share without locks + */ + + DBUG_ENTER("ndbcluster_create_event_ops"); + + DBUG_ASSERT(share != 0); + + if (share->op) + { + assert(share->op->getCustomData() == (void *) share); + + DBUG_ASSERT(share->use_count > 1); + sql_print_error("NDB Binlog: discover reusing old ev op"); + free_share(&share); // old event op already has reference + DBUG_RETURN(0); + } + + TABLE *table= share->table; + if (table) + { + /* + Logging of blob tables is not yet implemented, it would require: + 1. setup of events also on the blob attribute tables + 2. collect the pieces of the blob into one from an epoch to + provide a full blob to binlog + */ + if (table->s->blob_fields) + { + sql_print_error("NDB Binlog: logging of blob table %s " + "is not supported", share->key); + DBUG_RETURN(0); + } + } + + int do_schema_share= 0, do_apply_status_share= 0; + int retries= 100; + if (!schema_share && strcmp(share->db, NDB_REP_DB) == 0 && + strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0) + do_schema_share= 1; + else if (!apply_status_share && strcmp(share->db, NDB_REP_DB) == 0 && + strcmp(share->table_name, NDB_APPLY_TABLE) == 0) + do_apply_status_share= 1; + + while (1) + { + pthread_mutex_lock(&injector_mutex); + Ndb *ndb= injector_ndb; + if (do_schema_share) + ndb= schema_ndb; + + if (ndb == 0) + { + pthread_mutex_unlock(&injector_mutex); + DBUG_RETURN(-1); + } + + NdbEventOperation *op= ndb->createEventOperation(event_name); + if (!op) + { + pthread_mutex_unlock(&injector_mutex); + sql_print_error("NDB Binlog: Creating NdbEventOperation failed for" + " %s",event_name); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + ndb->getNdbError().code, + ndb->getNdbError().message, + "NDB"); + DBUG_RETURN(-1); + } + + int n_columns= ndbtab->getNoOfColumns(); + int n_fields= table ? table->s->fields : 0; + for (int j= 0; j < n_columns; j++) + { + const char *col_name= ndbtab->getColumn(j)->getName(); + NdbRecAttr *attr0, *attr1; + if (j < n_fields) + { + Field *f= share->table->field[j]; + if (is_ndb_compatible_type(f)) + { + DBUG_PRINT("info", ("%s compatible", col_name)); + attr0= op->getValue(col_name, f->ptr); + attr1= op->getPreValue(col_name, (f->ptr-share->table->record[0]) + + share->table->record[1]); + } + else + { + DBUG_PRINT("info", ("%s non compatible", col_name)); + attr0= op->getValue(col_name); + attr1= op->getPreValue(col_name); + } + } + else + { + DBUG_PRINT("info", ("%s hidden key", col_name)); + attr0= op->getValue(col_name); + attr1= op->getPreValue(col_name); + } + share->ndb_value[0][j].rec= attr0; + share->ndb_value[1][j].rec= attr1; + } + op->setCustomData((void *) share); // set before execute + share->op= op; // assign op in NDB_SHARE + if (op->execute()) + { + share->op= NULL; + retries--; + if (op->getNdbError().status != NdbError::TemporaryError && + op->getNdbError().code != 1407) + retries= 0; + if (retries == 0) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + op->getNdbError().code, op->getNdbError().message, + "NDB"); + sql_print_error("NDB Binlog: ndbevent->execute failed for %s; %d %s", + event_name, + op->getNdbError().code, op->getNdbError().message); + } + ndb->dropEventOperation(op); + pthread_mutex_unlock(&injector_mutex); + if (retries) + continue; + DBUG_RETURN(-1); + } + pthread_mutex_unlock(&injector_mutex); + break; + } + + get_share(share); + if (do_apply_status_share) + apply_status_share= get_share(share); + else if (do_schema_share) + schema_share= get_share(share); + + DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u", + share->key, share->op, share->use_count)); + + if (ndb_extra_logging) + sql_print_information("NDB Binlog: logging %s", share->key); + DBUG_RETURN(0); +} + +/* + when entering the calling thread should have a share lock id share != 0 + then the injector thread will have one as well, i.e. share->use_count == 0 + (unless it has already dropped... then share->op == 0) +*/ +int +ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, + NDB_SHARE *share) +{ + DBUG_ENTER("ndbcluster_handle_drop_table"); + + NDBDICT *dict= ndb->getDictionary(); + if (event_name && dict->dropEvent(event_name)) + { + if (dict->getNdbError().code != 4710) + { + /* drop event failed for some reason, issue a warning */ + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + dict->getNdbError().code, + dict->getNdbError().message, "NDB"); + /* error is not that the event did not exist */ + sql_print_error("NDB Binlog: Unable to drop event in database. " + "Event: %s Error Code: %d Message: %s", + event_name, + dict->getNdbError().code, + dict->getNdbError().message); + /* ToDo; handle error? */ + if (share && share->op && + share->op->getState() == NdbEventOperation::EO_EXECUTING && + dict->getNdbError().code != 4009) + { + DBUG_ASSERT(false); + DBUG_RETURN(-1); + } + } + } + + if (share == 0 || share->op == 0) + { + DBUG_RETURN(0); + } + +/* + Syncronized drop between client thread and injector thread is + neccessary in order to maintain ordering in the binlog, + such that the drop occurs _after_ any inserts/updates/deletes. + + The penalty for this is that the drop table becomes slow. + + This wait is however not strictly neccessary to produce a binlog + that is usable. However the slave does not currently handle + these out of order, thus we are keeping the SYNC_DROP_ defined + for now. +*/ +#define SYNC_DROP_ +#ifdef SYNC_DROP_ + (void) pthread_mutex_lock(&share->mutex); + int max_timeout= 10; + while (share->op) + { + struct timespec abstime; + set_timespec(abstime, 1); + (void) pthread_cond_timedwait(&injector_cond, + &share->mutex, + &abstime); + max_timeout--; + if (share->op == 0) + break; + if (max_timeout == 0) + { + sql_print_error("NDB delete table: timed out. Ignoring..."); + break; + } + if (ndb_extra_logging) + sql_print_information("NDB delete table: " + "waiting max %u sec for drop table %s.", + max_timeout, share->key); + } + (void) pthread_mutex_unlock(&share->mutex); +#else + (void) pthread_mutex_lock(&share->mutex); + share->op_old= share->op; + share->op= 0; + (void) pthread_mutex_unlock(&share->mutex); +#endif + + DBUG_RETURN(0); +} + + +/******************************************************************** + Internal helper functions for differentd events from the stoarage nodes + used by the ndb injector thread +********************************************************************/ + +/* + Handle error states on events from the storage nodes +*/ +static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp, + Binlog_index_row &row) +{ + NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); + DBUG_ENTER("ndb_binlog_thread_handle_error"); + + int overrun= pOp->isOverrun(); + if (overrun) + { + /* + ToDo: this error should rather clear the binlog_index... + and continue + */ + sql_print_error("NDB Binlog: Overrun in event buffer, " + "this means we have dropped events. Cannot " + "continue binlog for %s", share->key); + pOp->clearError(); + DBUG_RETURN(-1); + } + + if (!pOp->isConsistent()) + { + /* + ToDo: this error should rather clear the binlog_index... + and continue + */ + sql_print_error("NDB Binlog: Not Consistent. Cannot " + "continue binlog for %s. Error code: %d" + " Message: %s", share->key, + pOp->getNdbError().code, + pOp->getNdbError().message); + pOp->clearError(); + DBUG_RETURN(-1); + } + sql_print_error("NDB Binlog: unhandled error %d for table %s", + pOp->hasError(), share->key); + pOp->clearError(); + DBUG_RETURN(0); +} + +static int +ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp, + Binlog_index_row &row) +{ + NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); + NDBEVENT::TableEvent type= pOp->getEventType(); + + /* make sure to flush any pending events as they can be dependent + on one of the tables being changed below + */ + injector_thd->binlog_flush_pending_rows_event(true); + + switch (type) + { + case NDBEVENT::TE_CLUSTER_FAILURE: + if (apply_status_share == share) + { + free_share(&apply_status_share); + apply_status_share= 0; + } + if (ndb_extra_logging) + sql_print_information("NDB Binlog: cluster failure for %s.", share->key); + DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: " + "%s received share: 0x%lx op: %lx share op: %lx " + "op_old: %lx", + share->key, share, pOp, share->op, share->op_old)); + break; + case NDBEVENT::TE_ALTER: + /* ToDo: remove printout */ + if (ndb_extra_logging) + sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.", + share_prefix, share->table->s->db.str, + share->table->s->table_name.str, + share->key); + /* do the rename of the table in the share */ + share->table->s->db.str= share->db; + share->table->s->db.length= strlen(share->db); + share->table->s->table_name.str= share->table_name; + share->table->s->table_name.length= strlen(share->table_name); + goto drop_alter_common; + case NDBEVENT::TE_DROP: + if (apply_status_share == share) + { + free_share(&apply_status_share); + apply_status_share= 0; + } + /* ToDo: remove printout */ + if (ndb_extra_logging) + sql_print_information("NDB Binlog: drop table %s.", share->key); +drop_alter_common: + row.n_schemaops++; + DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: %lx " + "share op: %lx op_old: %lx", + type == NDBEVENT::TE_DROP ? "DROP" : "ALTER", + share->key, share, pOp, share->op, share->op_old)); + break; + case NDBEVENT::TE_NODE_FAILURE: + /* fall through */ + case NDBEVENT::TE_SUBSCRIBE: + /* fall through */ + case NDBEVENT::TE_UNSUBSCRIBE: + /* ignore */ + return 0; + default: + sql_print_error("NDB Binlog: unknown non data event %d for %s. " + "Ignoring...", (unsigned) type, share->key); + return 0; + } + + ndb_handle_schema_change(injector_thd, ndb, pOp, share); + return 0; +} + +/* + Handle data events from the storage nodes +*/ +static int +ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, + Binlog_index_row &row, + injector::transaction &trans) +{ + NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); + if (share == apply_status_share) + return 0; + TABLE *table= share->table; + + assert(table != 0); + + dbug_print_table("table", table); + + TABLE_SHARE *table_s= table->s; + uint n_fields= table_s->fields; + MY_BITMAP b; + /* Potential buffer for the bitmap */ + uint32 bitbuf[128 / (sizeof(uint32) * 8)]; + bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL, + n_fields, false); + bitmap_set_all(&b); + + /* + row data is already in table->record[0] + As we told the NdbEventOperation to do this + (saves moving data about many times) + */ + + switch(pOp->getEventType()) + { + case NDBEVENT::TE_INSERT: + row.n_inserts++; + DBUG_PRINT("info", ("INSERT INTO %s", share->key)); + { + ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]); + trans.write_row(::server_id, injector::transaction::table(table, true), + &b, n_fields, table->record[0]); + } + break; + case NDBEVENT::TE_DELETE: + row.n_deletes++; + DBUG_PRINT("info",("DELETE FROM %s", share->key)); + { + /* + table->record[0] contains only the primary key in this case + since we do not have an after image + */ + int n; + if (table->s->primary_key != MAX_KEY) + n= 0; /* + use the primary key only as it save time and space and + it is the only thing needed to log the delete + */ + else + n= 1; /* + we use the before values since we don't have a primary key + since the mysql server does not handle the hidden primary + key + */ + + ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]); + print_records(table, table->record[n]); + trans.delete_row(::server_id, injector::transaction::table(table, true), + &b, n_fields, table->record[n]); + } + break; + case NDBEVENT::TE_UPDATE: + row.n_updates++; + DBUG_PRINT("info", ("UPDATE %s", share->key)); + { + ndb_unpack_record(table, share->ndb_value[0], + &b, table->record[0]); + print_records(table, table->record[0]); + if (table->s->primary_key != MAX_KEY) + { + /* + since table has a primary key, we can to a write + using only after values + */ + trans.write_row(::server_id, injector::transaction::table(table, true), + &b, n_fields, table->record[0]);// after values + } + else + { + /* + mysql server cannot handle the ndb hidden key and + therefore needs the before image as well + */ + ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]); + print_records(table, table->record[1]); + trans.update_row(::server_id, + injector::transaction::table(table, true), + &b, n_fields, + table->record[1], // before values + table->record[0]);// after values + } + } + break; + default: + /* We should REALLY never get here. */ + DBUG_PRINT("info", ("default - uh oh, a brain exploded.")); + break; + } + + return 0; +} + +//#define RUN_NDB_BINLOG_TIMER +#ifdef RUN_NDB_BINLOG_TIMER +class Timer +{ +public: + Timer() { start(); } + void start() { gettimeofday(&m_start, 0); } + void stop() { gettimeofday(&m_stop, 0); } + ulong elapsed_ms() + { + return (ulong) + (((longlong) m_stop.tv_sec - (longlong) m_start.tv_sec) * 1000 + + ((longlong) m_stop.tv_usec - + (longlong) m_start.tv_usec + 999) / 1000); + } +private: + struct timeval m_start,m_stop; +}; +#endif + +/**************************************************************** + Injector thread main loop +****************************************************************/ + +pthread_handler_t ndb_binlog_thread_func(void *arg) +{ + THD *thd; /* needs to be first for thread_stack */ + Ndb *ndb= 0; + Thd_ndb *thd_ndb=0; + int ndb_update_binlog_index= 1; + injector *inj= injector::instance(); + + pthread_mutex_lock(&injector_mutex); + /* + Set up the Thread + */ + my_thread_init(); + DBUG_ENTER("ndb_binlog_thread"); + + thd= new THD; /* note that contructor of THD uses DBUG_ */ + THD_CHECK_SENTRY(thd); + + thd->thread_stack= (char*) &thd; /* remember where our stack is */ + if (thd->store_globals()) + { + thd->cleanup(); + delete thd; + ndb_binlog_thread_running= -1; + pthread_mutex_unlock(&injector_mutex); + pthread_cond_signal(&injector_cond); + my_thread_end(); + pthread_exit(0); + DBUG_RETURN(NULL); + } + + thd->init_for_queries(); + thd->command= COM_DAEMON; + thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG; + thd->version= refresh_version; + thd->set_time(); + thd->main_security_ctx.host_or_ip= ""; + thd->client_capabilities= 0; + my_net_init(&thd->net, 0); + thd->main_security_ctx.master_access= ~0; + thd->main_security_ctx.priv_user= 0; + + /* + Set up ndb binlog + */ + sql_print_information("Starting MySQL Cluster Binlog Thread"); + + pthread_detach_this_thread(); + thd->real_id= pthread_self(); + pthread_mutex_lock(&LOCK_thread_count); + thd->thread_id= thread_id++; + threads.append(thd); + pthread_mutex_unlock(&LOCK_thread_count); + thd->lex->start_transaction_opt= 0; + + if (!(schema_ndb= new Ndb(g_ndb_cluster_connection, "")) || + schema_ndb->init()) + { + sql_print_error("NDB Binlog: Getting Schema Ndb object failed"); + goto err; + } + + if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) || + ndb->init()) + { + sql_print_error("NDB Binlog: Getting Ndb object failed"); + ndb_binlog_thread_running= -1; + pthread_mutex_unlock(&injector_mutex); + pthread_cond_signal(&injector_cond); + goto err; + } + + /* + Expose global reference to our ndb object. + + Used by both sql client thread and binlog thread to interact + with the storage + pthread_mutex_lock(&injector_mutex); + */ + injector_thd= thd; + injector_ndb= ndb; + ndb_binlog_thread_running= 1; + + /* + We signal the thread that started us that we've finished + starting up. + */ + pthread_mutex_unlock(&injector_mutex); + pthread_cond_signal(&injector_cond); + + thd->proc_info= "Waiting for ndbcluster to start"; + + pthread_mutex_lock(&injector_mutex); + while (!ndbcluster_util_inited) + { + /* ndb not connected yet */ + struct timespec abstime; + set_timespec(abstime, 1); + pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime); + if (abort_loop) + { + pthread_mutex_unlock(&injector_mutex); + goto err; + } + } + pthread_mutex_unlock(&injector_mutex); + + /* + Main NDB Injector loop + */ + + DBUG_ASSERT(ndbcluster_hton.slot != ~(uint)0); + if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) + { + sql_print_error("Could not allocate Thd_ndb object"); + goto err; + } + set_thd_ndb(thd, thd_ndb); + thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP; + thd->query_id= 0; // to keep valgrind quiet + { + static char db[]= ""; + thd->db= db; + open_binlog_index(thd, &binlog_tables, &binlog_index); + if (!apply_status_share) + { + sql_print_error("NDB: Could not get apply status share"); + } + thd->db= db; + } + +#ifdef RUN_NDB_BINLOG_TIMER + Timer main_timer; +#endif + for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) && + ndb_latest_handled_binlog_epoch >= g_latest_trans_gci); ) + { + +#ifdef RUN_NDB_BINLOG_TIMER + main_timer.stop(); + sql_print_information("main_timer %ld ms", main_timer.elapsed_ms()); + main_timer.start(); +#endif + + /* + now we don't want any events before next gci is complete + */ + thd->proc_info= "Waiting for event from ndbcluster"; + thd->set_time(); + + /* wait for event or 1000 ms */ + Uint64 gci, schema_gci; + int res= ndb->pollEvents(1000, &gci); + int schema_res= schema_ndb->pollEvents(0, &schema_gci); + ndb_latest_received_binlog_epoch= gci; + + while (gci > schema_gci && schema_res >= 0) + schema_res= schema_ndb->pollEvents(10, &schema_gci); + + if ((abort_loop || do_ndbcluster_binlog_close_connection) && + ndb_latest_handled_binlog_epoch >= g_latest_trans_gci) + break; /* Shutting down server */ + + if (binlog_index && binlog_index->s->version < refresh_version) + { + if (binlog_index->s->version < refresh_version) + { + close_thread_tables(thd); + binlog_index= 0; + } + } + + MEM_ROOT **root_ptr= + my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); + MEM_ROOT *old_root= *root_ptr; + MEM_ROOT mem_root; + init_sql_alloc(&mem_root, 4096, 0); + List<Cluster_replication_schema> schema_list; + *root_ptr= &mem_root; + + if (unlikely(schema_res > 0)) + { + schema_ndb-> + setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip); + schema_ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage); + NdbEventOperation *pOp= schema_ndb->nextEvent(); + while (pOp != NULL) + { + if (!pOp->hasError()) + ndb_binlog_thread_handle_schema_event(thd, schema_ndb, pOp, + &schema_list, &mem_root); + else + sql_print_error("NDB: error %lu (%s) on handling " + "binlog schema event", + (ulong) pOp->getNdbError().code, + pOp->getNdbError().message); + pOp= schema_ndb->nextEvent(); + } + } + + if (res > 0) + { + DBUG_PRINT("info", ("pollEvents res: %d", res)); +#ifdef RUN_NDB_BINLOG_TIMER + Timer gci_timer, write_timer; + int event_count= 0; +#endif + thd->proc_info= "Processing events"; + NdbEventOperation *pOp= ndb->nextEvent(); + Binlog_index_row row; + while (pOp != NULL) + { + ndb-> + setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip); + ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage); + + assert(pOp->getGCI() <= ndb_latest_received_binlog_epoch); + bzero((char*) &row, sizeof(row)); + injector::transaction trans= inj->new_trans(thd); + gci= pOp->getGCI(); + if (apply_status_share) + { + TABLE *table= apply_status_share->table; + MY_BITMAP b; + uint32 bitbuf; + DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8); + bitmap_init(&b, &bitbuf, table->s->fields, false); + bitmap_set_all(&b); + table->field[0]->store((longlong)::server_id); + table->field[1]->store((longlong)gci); + trans.write_row(::server_id, + injector::transaction::table(table, true), + &b, table->s->fields, + table->record[0]); + } + else + { + sql_print_error("NDB: Could not get apply status share"); + } +#ifdef RUN_NDB_BINLOG_TIMER + write_timer.start(); +#endif + do + { +#ifdef RUN_NDB_BINLOG_TIMER + event_count++; +#endif + if (pOp->hasError() && + ndb_binlog_thread_handle_error(ndb, pOp, row) < 0) + goto err; + +#ifndef DBUG_OFF + { + NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); + DBUG_PRINT("info", + ("EVENT TYPE:%d GCI:%lld last applied: %lld " + "share: 0x%lx", pOp->getEventType(), gci, + ndb_latest_applied_binlog_epoch, share)); + DBUG_ASSERT(share != 0); + } +#endif + if ((unsigned) pOp->getEventType() < + (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT) + ndb_binlog_thread_handle_data_event(ndb, pOp, row, trans); + else + ndb_binlog_thread_handle_non_data_event(ndb, pOp, row); + + pOp= ndb->nextEvent(); + } while (pOp && pOp->getGCI() == gci); + + /* + note! pOp is not referring to an event in the next epoch + or is == 0 + */ +#ifdef RUN_NDB_BINLOG_TIMER + write_timer.stop(); +#endif + + if (row.n_inserts || row.n_updates + || row.n_deletes || row.n_schemaops) + { + injector::transaction::binlog_pos start= trans.start_pos(); + if (int r= trans.commit()) + { + sql_print_error("NDB binlog:" + "Error during COMMIT of GCI. Error: %d", + r); + /* TODO: Further handling? */ + } + row.gci= gci; + row.master_log_file= start.file_name(); + row.master_log_pos= start.file_pos(); + + DBUG_PRINT("info",("COMMIT gci %lld",gci)); + if (ndb_update_binlog_index) + ndb_add_binlog_index(thd, &row); + ndb_latest_applied_binlog_epoch= gci; + } + else + trans.commit(); + ndb_latest_handled_binlog_epoch= gci; +#ifdef RUN_NDB_BINLOG_TIMER + gci_timer.stop(); + sql_print_information("gci %ld event_count %d write time " + "%ld(%d e/s), total time %ld(%d e/s)", + (ulong)gci, event_count, + write_timer.elapsed_ms(), + event_count / write_timer.elapsed_ms(), + gci_timer.elapsed_ms(), + event_count / gci_timer.elapsed_ms()); +#endif + } + } + + { + Cluster_replication_schema *schema; + while ((schema= schema_list.pop())) + { + char *thd_db_save= thd->db; + thd->db= schema->db; + thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, + schema->query_length, FALSE, + schema->name[0] == 0); + thd->db= thd_db_save; + } + } + free_root(&mem_root, MYF(0)); + *root_ptr= old_root; + ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch; + } +err: + DBUG_PRINT("info",("Shutting down cluster binlog thread")); + close_thread_tables(thd); + pthread_mutex_lock(&injector_mutex); + /* don't mess with the injector_ndb anymore from other threads */ + injector_ndb= 0; + pthread_mutex_unlock(&injector_mutex); + thd->db= 0; // as not to try to free memory + sql_print_information("Stopping Cluster Binlog"); + + if (apply_status_share) + free_share(&apply_status_share); + if (schema_share) + free_share(&schema_share); + + /* remove all event operations */ + if (ndb) + { + NdbEventOperation *op; + DBUG_PRINT("info",("removing all event operations")); + while ((op= ndb->getEventOperation())) + { + DBUG_PRINT("info",("removing event operation on %s", + op->getEvent()->getName())); + NDB_SHARE *share= (NDB_SHARE*) op->getCustomData(); + free_share(&share); + ndb->dropEventOperation(op); + } + delete ndb; + ndb= 0; + } + + // Placed here to avoid a memory leak; TODO: check if needed + net_end(&thd->net); + delete thd; + + ndb_binlog_thread_running= -1; + (void) pthread_cond_signal(&injector_cond); + + DBUG_PRINT("exit", ("ndb_binlog_thread")); + my_thread_end(); + + pthread_exit(0); + DBUG_RETURN(NULL); +} + +bool +ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type) +{ + char buf[IO_SIZE]; + uint buflen; + ulonglong ndb_latest_epoch= 0; + DBUG_ENTER("ndbcluster_show_status_binlog"); + + pthread_mutex_lock(&injector_mutex); + if (injector_ndb) + { + ndb_latest_epoch= injector_ndb->getLatestGCI(); + pthread_mutex_unlock(&injector_mutex); + + buflen= + snprintf(buf, sizeof(buf), + "latest_epoch=%llu, " + "latest_trans_epoch=%llu, " + "latest_received_binlog_epoch=%llu, " + "latest_handled_binlog_epoch=%llu, " + "latest_applied_binlog_epoch=%llu", + ndb_latest_epoch, + g_latest_trans_gci, + ndb_latest_received_binlog_epoch, + ndb_latest_handled_binlog_epoch, + ndb_latest_applied_binlog_epoch); + if (stat_print(thd, ndbcluster_hton.name, strlen(ndbcluster_hton.name), + "binlog", strlen("binlog"), + buf, buflen)) + DBUG_RETURN(TRUE); + } + else + pthread_mutex_unlock(&injector_mutex); + DBUG_RETURN(FALSE); +} + +#endif /* HAVE_NDB_BINLOG */ diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h new file mode 100644 index 00000000000..5334120b43f --- /dev/null +++ b/sql/ha_ndbcluster_binlog.h @@ -0,0 +1,162 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +// Typedefs for long names +typedef NdbDictionary::Object NDBOBJ; +typedef NdbDictionary::Column NDBCOL; +typedef NdbDictionary::Table NDBTAB; +typedef NdbDictionary::Index NDBINDEX; +typedef NdbDictionary::Dictionary NDBDICT; +typedef NdbDictionary::Event NDBEVENT; + +#define IS_TMP_PREFIX(A) (is_prefix(A, tmp_file_prefix) || is_prefix(A, "@0023sql")) + +extern ulong ndb_extra_logging; + +#ifdef HAVE_NDB_BINLOG + +#define INJECTOR_EVENT_LEN 200 + +enum SCHEMA_OP_TYPE +{ + SOT_DROP_TABLE, + SOT_CREATE_TABLE, + SOT_RENAME_TABLE, + SOT_ALTER_TABLE, + SOT_DROP_DB, + SOT_CREATE_DB, + SOT_ALTER_DB, + SOT_CLEAR_SLOCK +}; + +const uint max_ndb_nodes= 64; /* multiple of 32 */ + +extern pthread_t ndb_binlog_thread; +extern pthread_mutex_t injector_mutex; +extern pthread_cond_t injector_cond; + +static const char *ha_ndb_ext=".ndb"; +static const char share_prefix[]= "./"; + +extern unsigned char g_node_id_map[max_ndb_nodes]; +extern handlerton ndbcluster_hton; +extern pthread_t ndb_util_thread; +extern pthread_mutex_t LOCK_ndb_util_thread; +extern pthread_cond_t COND_ndb_util_thread; +extern int ndbcluster_util_inited; +extern pthread_mutex_t ndbcluster_mutex; +extern HASH ndbcluster_open_tables; +extern Ndb_cluster_connection* g_ndb_cluster_connection; +extern long ndb_number_of_storage_nodes; + +/* + Initialize the binlog part of the ndb handlerton +*/ +void ndbcluster_binlog_init_handlerton(); +/* + Initialize the binlog part of the NDB_SHARE +*/ +void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table); + +int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, + const char *db, + const char *table_name, + NDB_SHARE *share); +int ndbcluster_create_event(Ndb *ndb, const NDBTAB *table, + const char *event_name, NDB_SHARE *share); +int ndbcluster_create_event_ops(NDB_SHARE *share, + const NDBTAB *ndbtab, + const char *event_name); +int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, + const char *query, int query_length, + const char *db, const char *table_name, + uint32 ndb_table_id, + uint32 ndb_table_version, + enum SCHEMA_OP_TYPE type); +int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, + NDB_SHARE *share); +void ndb_rep_event_name(String *event_name, + const char *db, const char *tbl); + +int ndbcluster_binlog_start(); +pthread_handler_t ndb_binlog_thread_func(void *arg); + +/* + table cluster_replication.apply_status +*/ +void ndbcluster_setup_binlog_table_shares(THD *thd); +extern NDB_SHARE *apply_status_share; +extern NDB_SHARE *schema_share; + +extern THD *injector_thd; +extern int ndb_binlog_thread_running; + +bool +ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type); + +/* + prototypes for ndb handler utility function also needed by + the ndb binlog code +*/ +int ndbcluster_find_all_files(THD *thd); +void ndb_unpack_record(TABLE *table, NdbValue *value, + MY_BITMAP *defined, byte *buf); + +NDB_SHARE *ndbcluster_get_share(const char *key, + TABLE *table, + bool create_if_not_exists, + bool have_lock); +NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share); +void ndbcluster_free_share(NDB_SHARE **share, bool have_lock); +void ndbcluster_real_free_share(NDB_SHARE **share); +int handle_trailing_share(NDB_SHARE *share); +inline NDB_SHARE *get_share(const char *key, + TABLE *table, + bool create_if_not_exists= TRUE, + bool have_lock= FALSE) +{ + return ndbcluster_get_share(key, table, create_if_not_exists, have_lock); +} + +inline NDB_SHARE *get_share(NDB_SHARE *share) +{ + return ndbcluster_get_share(share); +} + +inline void free_share(NDB_SHARE **share, bool have_lock= FALSE) +{ + ndbcluster_free_share(share, have_lock); +} + +inline void real_free_share(NDB_SHARE **share) +{ + ndbcluster_real_free_share(share); +} + +inline +Thd_ndb * +get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; } + +inline +void +set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; } + +Ndb* check_ndb_in_thd(THD* thd); + + +#endif /* HAVE_NDB_BINLOG */ diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h new file mode 100644 index 00000000000..d726fd63e1d --- /dev/null +++ b/sql/ha_ndbcluster_tables.h @@ -0,0 +1,21 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#define NDB_REP_DB "cluster_replication" +#define NDB_REP_TABLE "binlog_index" +#define NDB_APPLY_TABLE "apply_status" +#define NDB_SCHEMA_TABLE "schema" diff --git a/sql/handler.cc b/sql/handler.cc index 2391c54971d..ead22b6b03f 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2411,6 +2411,132 @@ int ha_table_exists_in_engine(THD* thd, const char* db, const char* name) DBUG_RETURN(error); } +#ifdef HAVE_NDB_BINLOG +/* + TODO: change this into a dynamic struct + List<handlerton> does not work as + 1. binlog_end is called when MEM_ROOT is gone + 2. cannot work with thd MEM_ROOT as memory should be freed +*/ +#define MAX_HTON_LIST_ST 63 +struct hton_list_st +{ + handlerton *hton[MAX_HTON_LIST_ST]; + uint sz; +}; + +struct binlog_func_st +{ + enum_binlog_func fn; + void *arg; +}; + +/* + Listing handlertons first to avoid recursive calls and deadlock +*/ +static my_bool binlog_func_list(THD *thd, st_plugin_int *plugin, void *arg) +{ + hton_list_st *hton_list= (hton_list_st *)arg; + handlerton *hton= (handlerton *) plugin->plugin->info; + if (hton->state == SHOW_OPTION_YES && hton->binlog_func) + { + uint sz= hton_list->sz; + if (sz == MAX_HTON_LIST_ST-1) + { + /* list full */ + return FALSE; + } + hton_list->hton[sz]= hton; + hton_list->sz= sz+1; + } + return FALSE; +} + +static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn) +{ + handlerton *hton; + hton_list_st hton_list; + hton_list.sz= 0; + plugin_foreach(thd, binlog_func_list, + MYSQL_STORAGE_ENGINE_PLUGIN, &hton_list); + + uint i= 0, sz= hton_list.sz; + while(i < sz) + hton_list.hton[i++]->binlog_func(thd, bfn->fn, bfn->arg); + return FALSE; +} + +int ha_reset_logs(THD *thd) +{ + binlog_func_st bfn= {BFN_RESET_LOGS, 0}; + binlog_func_foreach(thd, &bfn); + return 0; +} + +void ha_reset_slave(THD* thd) +{ + binlog_func_st bfn= {BFN_RESET_SLAVE, 0}; + binlog_func_foreach(thd, &bfn); +} + +void ha_binlog_wait(THD* thd) +{ + binlog_func_st bfn= {BFN_BINLOG_WAIT, 0}; + binlog_func_foreach(thd, &bfn); +} + +int ha_binlog_end(THD* thd) +{ + binlog_func_st bfn= {BFN_BINLOG_END, 0}; + binlog_func_foreach(thd, &bfn); + return 0; +} + +int ha_binlog_index_purge_file(THD *thd, const char *file) +{ + binlog_func_st bfn= {BFN_BINLOG_PURGE_FILE, (void *)file}; + binlog_func_foreach(thd, &bfn); +} + +struct binlog_log_query_st +{ + enum_binlog_command binlog_command; + const char *query; + uint query_length; + const char *db; + const char *table_name; +}; + +static my_bool binlog_log_query_handlerton(THD *thd, + st_plugin_int *plugin, + void *args) +{ + struct binlog_log_query_st *b= (struct binlog_log_query_st*)args; + handlerton *hton= (handlerton *) plugin->plugin->info; + if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query) + hton->binlog_log_query(thd, + b->binlog_command, + b->query, + b->query_length, + b->db, + b->table_name); + return FALSE; +} + +void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command, + const char *query, uint query_length, + const char *db, const char *table_name) +{ + struct binlog_log_query_st b; + b.binlog_command= binlog_command; + b.query= query; + b.query_length= query_length; + b.db= db; + b.table_name= table_name; + plugin_foreach(thd, binlog_log_query_handlerton, + MYSQL_STORAGE_ENGINE_PLUGIN, &b); +} +#endif /* Read the first row of a multi-range set. @@ -2832,6 +2958,8 @@ template<class RowsEventT> int binlog_log_row(TABLE* table, const byte *before_record, const byte *after_record) { + if (table->file->is_injective()) + return 0; bool error= 0; THD *const thd= current_thd; diff --git a/sql/handler.h b/sql/handler.h index f6680679a35..1ed19b72331 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -205,6 +205,24 @@ enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED, ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT }; +enum enum_binlog_func { + BFN_RESET_LOGS= 1, + BFN_RESET_SLAVE= 2, + BFN_BINLOG_WAIT= 3, + BFN_BINLOG_END= 4, + BFN_BINLOG_PURGE_FILE= 5 +}; + +enum enum_binlog_command { + LOGCOM_CREATE_TABLE, + LOGCOM_ALTER_TABLE, + LOGCOM_RENAME_TABLE, + LOGCOM_DROP_TABLE, + LOGCOM_CREATE_DB, + LOGCOM_ALTER_DB, + LOGCOM_DROP_DB +}; + /* struct to hold information about the table that should be created */ /* Bits in used_fields */ @@ -420,7 +438,8 @@ typedef struct handlerton structure version */ const int interface_version; -#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0000 +/* last version change: 0x0001 in 5.1.6 */ +#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0001 /* @@ -512,6 +531,15 @@ typedef struct bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info); uint32 flags; /* global handler flags */ + /* + Handlerton functions are not set in the different storage + engines static initialization. They are initialized at handler init. + Thus, leave them last in the struct. + */ + int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg); + void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command, + const char *query, uint query_length, + const char *db, const char *table_name); } handlerton; extern const handlerton default_hton; @@ -1195,6 +1223,12 @@ public: virtual int ha_update_row(const byte * old_data, byte * new_data); virtual int ha_delete_row(const byte * buf); /* + If the handler does it's own injection of the rows, this member function + should return 'true'. + */ + virtual bool is_injective() const { return false; } + + /* SYNOPSIS start_bulk_update() RETURN @@ -1705,3 +1739,21 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht); int ha_repl_report_sent_binlog(THD *thd, char *log_file_name, my_off_t end_offset); int ha_repl_report_replication_stop(THD *thd); + +#ifdef HAVE_NDB_BINLOG +int ha_reset_logs(THD *thd); +int ha_binlog_index_purge_file(THD *thd, const char *file); +void ha_reset_slave(THD *thd); +void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command, + const char *query, uint query_length, + const char *db, const char *table_name); +void ha_binlog_wait(THD *thd); +int ha_binlog_end(THD *thd); +#else +#define ha_reset_logs(a) 0 +#define ha_binlog_index_purge_file(a,b) 0 +#define ha_reset_slave(a) +#define ha_binlog_log_query(a,b,c,d,e,f); +#define ha_binlog_wait(a) +#define ha_binlog_end(a) 0 +#endif diff --git a/sql/log.cc b/sql/log.cc index fe95419fffd..7232d3a24dd 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -988,6 +988,7 @@ bool MYSQL_LOG::reset_logs(THD* thd) enum_log_type save_log_type; DBUG_ENTER("reset_logs"); + ha_reset_logs(thd); /* We need to get both locks to be sure that no one is trying to write to the index log file. @@ -1237,6 +1238,9 @@ int MYSQL_LOG::purge_logs(const char *to_log, DBUG_PRINT("info",("purging %s",log_info.log_file_name)); if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space) *decrease_log_space-= file_size; + + ha_binlog_index_purge_file(current_thd, log_info.log_file_name); + if (find_next_log(&log_info, 0) || exit_loop) break; } @@ -1297,6 +1301,9 @@ int MYSQL_LOG::purge_logs_before_date(time_t purge_time) stat_area.st_mtime >= purge_time) break; my_delete(log_info.log_file_name, MYF(0)); + + ha_binlog_index_purge_file(current_thd, log_info.log_file_name); + if (find_next_log(&log_info, 0)) break; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 712fff15774..04a6276d476 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -79,6 +79,20 @@ static void clear_all_errors(THD *thd, struct st_relay_log_info *rli) inline int ignored_error_code(int err_code) { +#ifdef HAVE_NDB_BINLOG + /* + The following error codes are hard-coded and will always be ignored. + */ + switch (err_code) + { + case ER_DB_CREATE_EXISTS: + case ER_DB_DROP_EXISTS: + return 1; + default: + /* Nothing to do */ + break; + } +#endif return ((err_code == ER_SLAVE_IGNORED_TABLE) || (use_slave_mask && bitmap_is_set(&slave_error_mask, err_code))); } @@ -5276,7 +5290,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) { slave_print_msg(ERROR_LEVEL, rli, error, "Error in %s event: error during table %s.%s lock", - get_type_str(), table->s->db, table->s->table_name); + get_type_str(), table->s->db.str, + table->s->table_name.str); DBUG_RETURN(error); } /* @@ -5412,7 +5427,12 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) if (error) { /* error has occured during the transaction */ - /* + slave_print_msg(ERROR_LEVEL, rli, error, + "Error in %s event: error during transaction execution " + "on table %s.%s", + get_type_str(), table->s->db.str, + table->s->table_name.str); + /* If one day we honour --skip-slave-errors in row-based replication, and the error should be skipped, then we would clear mappings, rollback, close tables, but the slave SQL thread would not stop and then may @@ -5485,7 +5505,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) slave_print_msg(ERROR_LEVEL, rli, error, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", - get_type_str(), table->s->db, table->s->table_name); + get_type_str(), table->s->db.str, + table->s->table_name.str); DBUG_RETURN(error); } @@ -5585,8 +5606,8 @@ void Rows_log_event::pack_info(Protocol *protocol) { char buf[256]; char const *const flagstr= get_flags(STMT_END_F) ? "STMT_END_F" : ""; - char const *const dbnam= m_table->s->db; - char const *const tblnam= m_table->s->table_name; + char const *const dbnam= m_table->s->db.str; + char const *const tblnam= m_table->s->table_name.str; my_size_t bytes= snprintf(buf, sizeof(buf), "%s.%s - %s", dbnam, tblnam, flagstr); protocol->store(buf, bytes, &my_charset_bin); @@ -6105,7 +6126,8 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) */ thd->lex->sql_command= SQLCOM_REPLACE; - table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // needed for ndbcluster + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster + table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster /* TODO: the cluster team (Tomas?) says that it's better if the engine knows how many rows are going to be inserted, then it can allocate needed memory @@ -6373,6 +6395,9 @@ static int find_and_fetch_row(TABLE *table, byte *key, byte *record_buf) DBUG_ASSERT(record_buf); + /* We need to retrieve all fields */ + table->file->ha_set_all_bits_in_read_set(); + if (table->s->keys > 0) { int error; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index b3bc49b31d1..026234caf34 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -538,6 +538,7 @@ enum enum_mysql_completiontype { COMMIT_RELEASE=-1, COMMIT=0, COMMIT_AND_CHAIN=6 }; +bool begin_trans(THD *thd); int end_trans(THD *thd, enum enum_mysql_completiontype completion); Item *negate_expression(THD *thd, Item *expr); @@ -640,6 +641,7 @@ bool table_cache_init(void); void table_cache_free(void); bool table_def_init(void); void table_def_free(void); +void assign_new_table_id(TABLE *table); uint cached_open_tables(void); uint cached_table_definitions(void); void kill_mysql(void); @@ -1041,7 +1043,7 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b); bool remove_table_from_cache(THD *thd, const char *db, const char *table, uint flags); -bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables); +bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); void copy_field_from_tmp_record(Field *field,int offset); bool fill_record(THD *thd, Field **field, List<Item> &values, bool ignore_errors); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index cff969d4113..cf1a4c4c936 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -417,6 +417,11 @@ my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; ulong opt_ndb_cache_check_time; const char *opt_ndb_mgmd; ulong opt_ndb_nodeid; +ulong ndb_extra_logging; +#ifdef HAVE_NDB_BINLOG +ulong ndb_report_thresh_binlog_epoch_slip; +ulong ndb_report_thresh_binlog_mem_usage; +#endif extern SHOW_VAR ndb_status_variables[]; extern const char *ndb_distribution_names[]; @@ -1134,6 +1139,11 @@ void clean_up(bool print_message) mysql_log.cleanup(); mysql_slow_log.cleanup(); + /* + make sure that handlers finish up + what they have that is dependent on the binlog + */ + ha_binlog_end(current_thd); mysql_bin_log.cleanup(); #ifdef HAVE_REPLICATION @@ -3106,11 +3116,12 @@ with --log-bin instead."); } if (opt_binlog_format_id == BF_UNSPECIFIED) { - /* - We use statement-based by default, but could change this to be row-based - if this is a cluster build (i.e. have_ndbcluster is true)... - */ - opt_binlog_format_id= BF_STMT; +#ifdef HAVE_NDB_BINLOG + if (have_ndbcluster == SHOW_OPTION_YES) + opt_binlog_format_id= BF_ROW; + else +#endif + opt_binlog_format_id= BF_STMT; } #ifdef HAVE_ROW_BASED_REPLICATION if (opt_binlog_format_id == BF_ROW) @@ -4646,6 +4657,9 @@ enum options_mysqld OPT_NDB_DISTRIBUTION, OPT_NDB_INDEX_STAT_ENABLE, OPT_NDB_INDEX_STAT_CACHE_ENTRIES, OPT_NDB_INDEX_STAT_UPDATE_FREQ, + OPT_NDB_EXTRA_LOGGING, + OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP, + OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4848,7 +4862,11 @@ Disable with --skip-bdb (will save memory).", "Tell the master the form of binary logging to use: either 'row' for " "row-based binary logging (which automatically turns on " "innodb_locks_unsafe_for_binlog as it is safe in this case), or " - "'statement' for statement-based logging. ", + "'statement' for statement-based logging. " +#ifdef HAVE_NDB_BINLOG + "If ndbcluster is enabled, the default will be set to 'row'." +#endif + , #else "Tell the master the form of binary logging to use: this release build " "supports only statement-based binary logging, so only 'statement' is " @@ -5302,6 +5320,29 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_force_send, (gptr*) &global_system_variables.ndb_force_send, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-extra-logging", OPT_NDB_EXTRA_LOGGING, + "Turn on more logging in the error log.", + (gptr*) &ndb_extra_logging, + (gptr*) &ndb_extra_logging, + 0, GET_INT, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_NDB_BINLOG + {"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP, + "Threshold on number of epochs to be behind before reporting binlog status. " + "E.g. 3 means that if the difference between what epoch has been received " + "from the storage nodes and what has been applied to the binlog is 3 or more, " + "a status message will be sent to the cluster log.", + (gptr*) &ndb_report_thresh_binlog_epoch_slip, + (gptr*) &ndb_report_thresh_binlog_epoch_slip, + 0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0}, + {"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE, + "Threshold on percentage of free memory before reporting binlog status. E.g. " + "10 means that if amount of available memory for receiving binlog data from " + "the storage nodes goes below 10%, " + "a status message will be sent to the cluster log.", + (gptr*) &ndb_report_thresh_binlog_mem_usage, + (gptr*) &ndb_report_thresh_binlog_mem_usage, + 0, GET_ULONG, REQUIRED_ARG, 10, 0, 100, 0, 0, 0}, +#endif {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT, "Use exact records count during query planning and for fast " "select count(*), disable for faster queries.", @@ -7500,6 +7541,14 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } opt_ndb_distribution_id= (enum ndb_distribution)(id-1); break; + case OPT_NDB_EXTRA_LOGGING: + if (!argument) + ndb_extra_logging++; + else if (argument == disabled_my_option) + ndb_extra_logging= 0L; + else + ndb_extra_logging= atoi(argument); + break; #endif case OPT_INNODB: #ifdef WITH_INNOBASE_STORAGE_ENGINE diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc new file mode 100644 index 00000000000..a69dea9a158 --- /dev/null +++ b/sql/rpl_injector.cc @@ -0,0 +1,153 @@ +/* + Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#include "rpl_injector.h" +#ifdef HAVE_ROW_BASED_REPLICATION + +/* + injector::transaction - member definitions +*/ + +/* inline since it's called below */ +inline +injector::transaction::transaction(MYSQL_LOG *log, THD *thd) + : m_thd(thd) +{ + /* + Default initialization of m_start_pos (which initializes it to garbage). + We need to fill it in using the code below. + */ + LOG_INFO log_info; + log->get_current_log(&log_info); + /* !!! binlog_pos does not follow RAII !!! */ + m_start_pos.m_file_name= my_strdup(log_info.log_file_name, MYF(0)); + m_start_pos.m_file_pos= log_info.pos; + + begin_trans(m_thd); +} + +injector::transaction::~transaction() +{ + /* Needed since my_free expects a 'char*' (instead of 'void*'). */ + char* const the_memory= const_cast<char*>(m_start_pos.m_file_name); + + /* + We set the first character to null just to give all the copies of the + start position a (minimal) chance of seening that the memory is lost. + All assuming the my_free does not step over the memory, of course. + */ + *the_memory= '\0'; + + my_free(the_memory, MYF(0)); +} + +int injector::transaction::commit() +{ + DBUG_ENTER("injector::transaction::commit()"); + m_thd->binlog_flush_pending_rows_event(true); + end_trans(m_thd, COMMIT); + DBUG_RETURN(0); +} + + +int injector::transaction::write_row (server_id_type sid, table tbl, + MY_BITMAP const* cols, size_t colcnt, + record_type record) +{ + DBUG_ENTER("injector::transaction::write_row(...)"); + m_thd->set_server_id(sid); + m_thd->binlog_write_row(tbl.get_table(), tbl.is_transactional(), + cols, colcnt, record); + DBUG_RETURN(0); +} + + +int injector::transaction::delete_row(server_id_type sid, table tbl, + MY_BITMAP const* cols, size_t colcnt, + record_type record) +{ + DBUG_ENTER("injector::transaction::delete_row(...)"); + m_thd->set_server_id(sid); + m_thd->binlog_delete_row(tbl.get_table(), tbl.is_transactional(), + cols, colcnt, record); + DBUG_RETURN(0); +} + + +int injector::transaction::update_row(server_id_type sid, table tbl, + MY_BITMAP const* cols, size_t colcnt, + record_type before, record_type after) +{ + DBUG_ENTER("injector::transaction::update_row(...)"); + m_thd->set_server_id(sid); + m_thd->binlog_update_row(tbl.get_table(), tbl.is_transactional(), + cols, colcnt, before, after); + DBUG_RETURN(0); +} + + +injector::transaction::binlog_pos injector::transaction::start_pos() const +{ + return m_start_pos; +} + + +/* + injector - member definitions +*/ + +/* This constructor is called below */ +inline injector::injector() +{ +} + +static injector *s_injector= 0; +injector *injector::instance() +{ + if (s_injector == 0) + s_injector= new injector; + /* "There can be only one [instance]" */ + return s_injector; +} + + + +injector::transaction injector::new_trans(THD *thd) +{ + DBUG_ENTER("injector::new_trans(THD*)"); + /* + Currently, there is no alternative to using 'mysql_bin_log' since that + is hardcoded into the way the handler is using the binary log. + */ + DBUG_RETURN(transaction(&mysql_bin_log, thd)); +} + +void injector::new_trans(THD *thd, injector::transaction *ptr) +{ + DBUG_ENTER("injector::new_trans(THD *, transaction *)"); + /* + Currently, there is no alternative to using 'mysql_bin_log' since that + is hardcoded into the way the handler is using the binary log. + */ + transaction trans(&mysql_bin_log, thd); + ptr->swap(trans); + + DBUG_VOID_RETURN; +} + +#endif diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h new file mode 100644 index 00000000000..32d3fdd1a78 --- /dev/null +++ b/sql/rpl_injector.h @@ -0,0 +1,251 @@ +/* + Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef INJECTOR_H +#define INJECTOR_H + +/* Pull in 'byte', 'my_off_t', and 'uint32' */ +#include <my_global.h> + +#ifdef HAVE_ROW_BASED_REPLICATION +#include <my_bitmap.h> + +/* Forward declarations */ +class handler; +class MYSQL_LOG; +class st_table; + +typedef st_table TABLE; + +/* + Injector to inject rows into the MySQL server. + + The injector class is used to notify the MySQL server of new rows that have + appeared outside of MySQL control. + + The original purpose of this is to allow clusters---which handle replication + inside the cluster through other means---to insert new rows into binary log. + Note, however, that the injector should be used whenever rows are altered in + any manner that is outside of MySQL server visibility and which therefore + are not seen by the MySQL server. + */ +class injector +{ +public: + + /* + Get an instance of the injector. + + DESCRIPTION + The injector is a Singleton, so this static function return the + available instance of the injector. + + RETURN VALUE + A pointer to the available injector object. + */ + static injector *instance(); + + /* + A transaction where rows can be added. + + DESCRIPTION + The transaction class satisfy the **CopyConstructible** and + **Assignable** requirements. Note that the transaction is *not* + default constructible. + */ + class transaction { + friend class injector; + public: + /* Convenience definitions */ + typedef byte* record_type; + typedef uint32 server_id_type; + + /* + Table reference. + + RESPONSIBILITY + + The class contains constructors to handle several forms of + references to tables. The constructors can implicitly be used to + construct references from, e.g., strings containing table names. + + EXAMPLE + + The class is intended to be used *by value*. Please, do not try to + construct objects of this type using 'new'; instead construct an + object, possibly a temporary object. For example: + + injector::transaction::table tbl(share->table, true); + MY_BITMAP cols; + bitmap_init(&cols, NULL, (i + 7) / 8, false); + inj->write_row(::server_id, tbl, &cols, row_data); + + or + + MY_BITMAP cols; + bitmap_init(&cols, NULL, (i + 7) / 8, false); + inj->write_row(::server_id, + injector::transaction::table(share->table, true), + &cols, row_data); + + This will work, be more efficient, and have greater chance of + inlining, not run the risk of losing pointers. + + COLLABORATION + + injector::transaction + Provide a flexible interface to the representation of tables. + + */ + class table + { + public: + table(TABLE *table, bool is_transactional) + : m_table(table), m_is_transactional(is_transactional) + { + } + + char const *db_name() const { return m_table->s->db.str; } + char const *table_name() const { return m_table->s->table_name.str; } + TABLE *get_table() const { return m_table; } + bool is_transactional() const { return m_is_transactional; } + + private: + TABLE *m_table; + bool m_is_transactional; + }; + + /* + Binlog position as a structure. + */ + class binlog_pos { + friend class transaction; + public: + char const *file_name() const { return m_file_name; } + my_off_t file_pos() const { return m_file_pos; } + + private: + char const *m_file_name; + my_off_t m_file_pos; + }; + + transaction() : m_thd(NULL) { } + transaction(transaction const&); + ~transaction(); + + /* Clear transaction, i.e., make calls to 'good()' return false. */ + void clear() { m_thd= NULL; } + + /* Is the transaction in a good state? */ + bool good() const { return m_thd != NULL; } + + /* Default assignment operator: standard implementation */ + transaction& operator=(transaction t) { + swap(t); + return *this; + } + + /* + Add a 'write row' entry to the transaction. + */ + int write_row (server_id_type sid, table tbl, + MY_BITMAP const *cols, size_t colcnt, + record_type record); + + /* + Add a 'delete row' entry to the transaction. + */ + int delete_row(server_id_type sid, table tbl, + MY_BITMAP const *cols, size_t colcnt, + record_type record); + + /* + Add an 'update row' entry to the transaction. + */ + int update_row(server_id_type sid, table tbl, + MY_BITMAP const *cols, size_t colcnt, + record_type before, record_type after); + + /* + Commit a transaction. + + This member function will clean up after a sequence of *_row calls by, + for example, releasing resource and unlocking files. + */ + int commit(); + + /* + Get the position for the start of the transaction. + + Returns the position in the binary log of the first event in this + transaction. If no event is yet written, the position where the event + *will* be written is returned. This position is known, since a + new_transaction() will lock the binary log and prevent any other + writes to the binary log. + */ + binlog_pos start_pos() const; + + private: + /* Only the injector may construct these object */ + transaction(MYSQL_LOG *, THD *); + + void swap(transaction& o) { + /* std::swap(m_start_pos, o.m_start_pos); */ + { + binlog_pos const tmp= m_start_pos; + m_start_pos= o.m_start_pos; + o.m_start_pos= tmp; + } + + /* std::swap(m_thd, o.m_thd); */ + { + THD* const tmp= m_thd; + m_thd= o.m_thd; + o.m_thd= tmp; + } + } + + binlog_pos m_start_pos; + THD *m_thd; + }; + + /* + Create a new transaction. This member function will prepare for a + sequence of *_row calls by, for example, reserving resources and + locking files. There are two overloaded alternatives: one returning a + transaction by value and one using placement semantics. The following + two calls are equivalent, with the exception that the latter will + overwrite the transaction. + + injector::transaction trans1= inj->new_trans(thd); + + injector::transaction trans2; + inj->new_trans(thd, &trans); + */ + transaction new_trans(THD *); + void new_trans(THD *, transaction *); + +private: + explicit injector(); + ~injector() { } /* Nothing needs to be done */ + injector(injector const&); /* You're not allowed to copy injector + instances. + */ +}; + +#endif /* HAVE_ROW_BASED_REPLICATION */ +#endif /* INJECTOR_H */ diff --git a/sql/set_var.cc b/sql/set_var.cc index f0b1779efc5..1ccd590171f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -101,6 +101,11 @@ extern ulong srv_commit_concurrency; /* WITH_NDBCLUSTER_STORAGE_ENGINE */ extern ulong ndb_cache_check_time; +extern ulong ndb_extra_logging; +#ifdef HAVE_NDB_BINLOG +extern ulong ndb_report_thresh_binlog_epoch_slip; +extern ulong ndb_report_thresh_binlog_mem_usage; +#endif @@ -481,6 +486,14 @@ sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", &SV::ndb_autoincrement_prefetch_sz); sys_var_thd_bool sys_ndb_force_send("ndb_force_send", &SV::ndb_force_send); +#ifdef HAVE_NDB_BINLOG +sys_var_long_ptr +sys_ndb_report_thresh_binlog_epoch_slip("ndb_report_thresh_binlog_epoch_slip", + &ndb_report_thresh_binlog_epoch_slip); +sys_var_long_ptr +sys_ndb_report_thresh_binlog_mem_usage("ndb_report_thresh_binlog_mem_usage", + &ndb_report_thresh_binlog_mem_usage); +#endif sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool @@ -496,6 +509,8 @@ sys_ndb_index_stat_cache_entries("ndb_index_stat_cache_entries", sys_var_thd_ulong sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq", &SV::ndb_index_stat_update_freq); +sys_var_long_ptr +sys_ndb_extra_logging("ndb_extra_logging", &ndb_extra_logging); /* Time/date/datetime formats */ @@ -847,10 +862,17 @@ SHOW_VAR init_vars[]= { {sys_ndb_autoincrement_prefetch_sz.name, (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS}, + {sys_ndb_extra_logging.name,(char*) &sys_ndb_extra_logging, SHOW_SYS}, {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_index_stat_cache_entries.name, (char*) &sys_ndb_index_stat_cache_entries, SHOW_SYS}, {sys_ndb_index_stat_enable.name, (char*) &sys_ndb_index_stat_enable, SHOW_SYS}, {sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS}, +#ifdef HAVE_NDB_BINLOG + {sys_ndb_report_thresh_binlog_epoch_slip.name, + (char*) &sys_ndb_report_thresh_binlog_epoch_slip, SHOW_SYS}, + {sys_ndb_report_thresh_binlog_mem_usage.name, + (char*) &sys_ndb_report_thresh_binlog_mem_usage, SHOW_SYS}, +#endif {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, diff --git a/sql/slave.cc b/sql/slave.cc index 99bddb7b9b0..41a13f2f5c5 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -39,7 +39,7 @@ typedef bool (*CHECK_KILLED_FUNC)(THD*,void*); volatile bool slave_sql_running = 0, slave_io_running = 0; char* slave_load_tmpdir = 0; -MASTER_INFO *active_mi; +MASTER_INFO *active_mi= 0; my_bool replicate_same_server_id; ulonglong relay_log_space_limit = 0; @@ -2885,6 +2885,47 @@ bool st_relay_log_info::cached_charset_compare(char *charset) return 0; } +/* + Check if the current error is of temporary nature of not. + Some errors are temporary in nature, such as + ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT. Ndb also signals + that the error is temporary by pushing a warning with the error code + ER_GET_TEMPORARY_ERRMSG, if the originating error is temporary. +*/ +static int has_temporary_error(THD *thd) +{ + if (thd->is_fatal_error) + return 0; + + /* + Temporary error codes: + currently, InnoDB deadlock detected by InnoDB or lock + wait timeout (innodb_lock_wait_timeout exceeded + */ + if (thd->net.last_errno == ER_LOCK_DEADLOCK || + thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT) + return 1; + +#ifdef HAVE_NDB_BINLOG + /* + currently temporary error set in ndbcluster + */ + List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); + MYSQL_ERROR *err; + while ((err= it++)) + { + DBUG_PRINT("info", ("has warning %d %s", err->code, err->msg)) + switch (err->code) + { + case ER_GET_TEMPORARY_ERRMSG: + return 1; + default: + break; + } + } +#endif + return 0; +} static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) { @@ -3004,6 +3045,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) ev->when = time(NULL); ev->thd = thd; // because up to this point, ev->thd == 0 exec_res = ev->exec_event(rli); + DBUG_PRINT("info", ("exec_event result = %d", exec_res)); DBUG_ASSERT(rli->sql_thd==thd); /* Format_description_log_event should not be deleted because it will be @@ -3017,17 +3059,13 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) } if (slave_trans_retries) { - if (exec_res && - (thd->net.last_errno == ER_LOCK_DEADLOCK || - thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT) && - !thd->is_fatal_error) + if (exec_res && has_temporary_error(thd)) { const char *errmsg; /* We were in a transaction which has been rolled back because of a - deadlock (currently, InnoDB deadlock detected by InnoDB) or lock - wait timeout (innodb_lock_wait_timeout exceeded); let's seek back to - BEGIN log event and retry it all again. + temporary error; + let's seek back to BEGIN log event and retry it all again. We have to not only seek but also a) init_master_info(), to seek back to hot relay log's start for later (for when we will come back to this hot log after re-processing the @@ -3539,10 +3577,39 @@ Slave SQL thread aborted. Can't execute init_slave query"); { // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(thd,rli)) + { + /* + retrieve as much info as possible from the thd and, error codes and warnings + and print this to the error log as to allow the user to locate the error + */ + if (thd->net.last_errno != 0) + { + if (rli->last_slave_errno == 0) + { + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, + thd->net.last_error ? + thd->net.last_error : "<no message>"); + } + else if (rli->last_slave_errno != thd->net.last_errno) + { + sql_print_error("Slave (additional info): %s Error_code: %d", + thd->net.last_error ? + thd->net.last_error : "<no message>", + thd->net.last_errno); + } + } + + /* Print any warnings issued */ + List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); + MYSQL_ERROR *err; + while ((err= it++)) + sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code); + sql_print_error("\ Error running query, slave SQL thread aborted. Fix the problem, and restart \ the slave SQL thread with \"SLAVE START\". We stopped at log \ '%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff)); + } goto err; } } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 74a5848fa0a..85c5a481d47 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -803,13 +803,14 @@ void free_io_cache(TABLE *table) */ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, - TABLE_LIST *tables) + TABLE_LIST *tables, bool have_lock) { bool result=0; DBUG_ENTER("close_cached_tables"); DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables)); - VOID(pthread_mutex_lock(&LOCK_open)); + if (!have_lock) + VOID(pthread_mutex_lock(&LOCK_open)); if (!tables) { refresh_version++; // Force close of open tables @@ -888,7 +889,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, for (TABLE *table=thd->open_tables; table ; table= table->next) table->s->version= refresh_version; } - VOID(pthread_mutex_unlock(&LOCK_open)); + if (!have_lock) + VOID(pthread_mutex_unlock(&LOCK_open)); if (if_wait_for_refresh) { pthread_mutex_lock(&thd->mysys_var->mutex); @@ -2383,7 +2385,7 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) table->s->table_map_id is not ULONG_MAX. */ -static void assign_new_table_id(TABLE *table) +void assign_new_table_id(TABLE *table) { static ulong last_table_id= ULONG_MAX; diff --git a/sql/sql_class.h b/sql/sql_class.h index c56924774ba..cb8c2818a19 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1383,6 +1383,7 @@ public: #define SYSTEM_THREAD_DELAYED_INSERT 1 #define SYSTEM_THREAD_SLAVE_IO 2 #define SYSTEM_THREAD_SLAVE_SQL 4 +#define SYSTEM_THREAD_NDBCLUSTER_BINLOG 8 /* Used to hold information about file and file structure in exchainge diff --git a/sql/sql_db.cc b/sql/sql_db.cc index d91f091174f..fa01f98d723 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -401,6 +401,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, bool silent) { char path[FN_REFLEN+16]; + char tmp_query[FN_REFLEN+16]; long result= 1; int error= 0; MY_STAT stat_info; @@ -486,15 +487,20 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, if (!thd->query) // Only in replication { - query= path; - query_length= (uint) (strxmov(path,"create database `", db, "`", NullS) - - path); + query= tmp_query; + query_length= (uint) (strxmov(tmp_query,"create database `", + db, "`", NullS) - tmp_query); } else { query= thd->query; query_length= thd->query_length; } + + ha_binlog_log_query(thd, LOGCOM_CREATE_DB, + query, query_length, + db, ""); + if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0, @@ -569,6 +575,10 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) thd->variables.collation_database= thd->db_charset; } + ha_binlog_log_query(thd, LOGCOM_ALTER_DB, + thd->query, thd->query_length, + db, ""); + if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index cb115adaffb..8238496175c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -79,7 +79,7 @@ const char *command_name[]={ "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", "Binlog Dump","Table Dump", "Connect Out", "Register Slave", "Prepare", "Execute", "Long Data", "Close stmt", - "Reset stmt", "Set option", "Fetch", + "Reset stmt", "Set option", "Fetch", "Daemon", "Error" // Last command number }; @@ -149,7 +149,7 @@ static bool end_active_trans(THD *thd) DBUG_RETURN(error); } -static bool begin_trans(THD *thd) +bool begin_trans(THD *thd) { int error=0; if (unlikely(thd->in_sub_stmt)) @@ -6682,6 +6682,8 @@ void kill_one_thread(THD *thd, ulong id, bool only_kill_query) I_List_iterator<THD> it(threads); while ((tmp=it++)) { + if (tmp->command == COM_DAEMON) + continue; if (tmp->thread_id == id) { pthread_mutex_lock(&tmp->LOCK_delete); // Lock from delete diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index dd70f90b3da..ed056f62fe3 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -972,6 +972,9 @@ int reset_slave(THD *thd, MASTER_INFO* mi) error=1; goto err; } + + ha_reset_slave(thd); + // delete relay logs, clear relay log coordinates if ((error= purge_relay_logs(&mi->rli, thd, 1 /* just reset */, @@ -1316,6 +1319,13 @@ bool mysql_show_binlog_events(THD* thd) Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); + /* + Wait for handlers to insert any pending information + into the binlog. For e.g. ndb which updates the binlog asynchronously + this is needed so that the uses sees all its own commands in the binlog + */ + ha_binlog_wait(thd); + if (mysql_bin_log.is_open()) { LEX_MASTER_INFO *lex_mi= &thd->lex->mi; diff --git a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp index a0109506af1..d144d814e5f 100644 --- a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp @@ -98,32 +98,11 @@ public: NoError = 0, Undefined = 1, NF_FakeErrorREF = 11, - Busy = 701 - }; -#if 0 - enum ErrorCode { - NoError = 0, - Undefined = 1, - UndefinedTCError = 2, - NF_FakeErrorREF = 11, Busy = 701, - NotMaster = 702, - SeizeError = 703, - EventNotFound = 4710, - EventNameTooLong = 4241, - TooManyEvents = 4242, - BadRequestType = 4247, - InvalidName = 4248, - InvalidPrimaryTable = 4249, - InvalidEventType = 4250, - NotUnique = 4251, - AllocationError = 4252, - CreateEventTableFailed = 4253, - InvalidAttributeOrder = 4255, - Temporary = 0x1 << 16 + NotMaster = 702 }; -#endif - STATIC_CONST( SignalLength = 5 ); + STATIC_CONST( SignalLength = 7 ); + STATIC_CONST( SignalLength2 = SignalLength+1 ); union { // user block reference Uint32 senderRef; @@ -139,16 +118,8 @@ public: }; Uint32 m_errorLine; Uint32 m_errorNode; -#if 0 - bool isTemporary() const - { return (errorCode & Temporary) > 0; } - - void setTemporary() - { errorCode |= Temporary; } - - ErrorCode setTemporary(ErrorCode ec) - { return (ErrorCode) (errorCode = ((Uint32) ec | (Uint32)Temporary)); } -#endif + // with SignalLength2 + Uint32 m_masterNodeId; Uint32 getUserRef() const { return m_userRef; } @@ -179,6 +150,12 @@ public: void setErrorNode(Uint32 val) { m_errorNode = val; } + Uint32 getMasterNode() const { + return m_masterNodeId; + } + void setMasterNode(Uint32 val) { + m_masterNodeId = val; + } }; /** @@ -199,6 +176,11 @@ struct CreateEvntReq { // RT_DICT_ABORT = 0xF << 4, // RT_TC = 5 << 8 }; + enum EventFlags { + EF_REPORT_ALL = 0x1 << 16, + EF_REPORT_SUBSCRIBE = 0x2 << 16, + EF_ALL = 0xFFFF << 16 + }; STATIC_CONST( SignalLengthGet = 3 ); STATIC_CONST( SignalLengthCreate = 6+MAXNROFATTRIBUTESINWORDS ); STATIC_CONST( SignalLength = 8+MAXNROFATTRIBUTESINWORDS ); @@ -217,10 +199,9 @@ struct CreateEvntReq { Uint32 m_tableId; // table to event Uint32 m_tableVersion; // table version AttributeMask::Data m_attrListBitmask; - Uint32 m_eventType; // from DictTabInfo::TableType + Uint32 m_eventType; // EventFlags (16 bits) + from DictTabInfo::TableType (16 bits) Uint32 m_eventId; // event table id set by DICT/SUMA Uint32 m_eventKey; // event table key set by DICT/SUMA - Uint32 getUserRef() const { return m_userRef; } @@ -268,10 +249,10 @@ struct CreateEvntReq { AttributeMask::assign(m_attrListBitmask.data, val); } Uint32 getEventType() const { - return m_eventType; + return m_eventType & ~EF_ALL; } void setEventType(Uint32 val) { - m_eventType = (Uint32)val; + m_eventType = (m_eventType & EF_ALL) | (~EF_ALL & (Uint32)val); } Uint32 getEventId() const { return m_eventId; @@ -285,6 +266,27 @@ struct CreateEvntReq { void setEventKey(Uint32 val) { m_eventKey = val; } + void clearFlags() { + m_eventType&= ~EF_ALL; + } + Uint32 getReportFlags() const { + return m_eventType & EF_ALL; + } + void setReportFlags(Uint32 val) { + m_eventType = (val & EF_ALL) | (m_eventType & ~EF_ALL); + } + Uint32 getReportAll() const { + return m_eventType & EF_REPORT_ALL ; + } + void setReportAll() { + m_eventType|= EF_REPORT_ALL; + } + Uint32 getReportSubscribe() const { + return m_eventType & EF_REPORT_SUBSCRIBE ; + } + void setReportSubscribe() { + m_eventType|= EF_REPORT_SUBSCRIBE; + } }; /** @@ -377,42 +379,14 @@ struct CreateEvntRef { friend bool printCREATE_EVNT_REF(FILE*, const Uint32*, Uint32, Uint16); STATIC_CONST( SignalLength = 11 ); + STATIC_CONST( SignalLength2 = SignalLength + 1 ); enum ErrorCode { NoError = 0, Undefined = 1, NF_FakeErrorREF = 11, - Busy = 701 - }; -#if 0 - enum ErrorCode { - NoError = 0, - Undefined = 1, - UndefinedTCError = 2, - NF_FakeErrorREF = 11, Busy = 701, - NotMaster = 702, - SeizeError = 703, - TooManyEvents = 4707, - EventNameTooLong = 4708, - EventNameExists = 746, - EventNotFound = 4731, - AttributeNullable = 4246, - BadRequestType = 4247, - InvalidName = 4248, - InvalidPrimaryTable = 4249, - InvalidEventType = 4250, - NotUnique = 4251, - AllocationError = 4252, - CreateEventTableFailed = 4711, - InvalidAttributeOrder = 4255, - Temporary = 0x1 << 16 + NotMaster = 702 }; - bool isTemporary() const; - void setTemporary(); - ErrorCode setTemporary(ErrorCode ec); - static ErrorCode makeTemporary(ErrorCode ec); -#endif - union { Uint32 m_userRef; // user block reference Uint32 senderRef; // user block reference @@ -431,15 +405,8 @@ struct CreateEvntRef { Uint32 errorCode; Uint32 m_errorLine; Uint32 m_errorNode; - -#if 0 - CreateEvntConf* getConf() { - return &m_conf; - } - const CreateEvntConf* getConf() const { - return &m_conf; - } -#endif + // with SignalLength2 + Uint32 m_masterNodeId; Uint32 getUserRef() const { return m_userRef; } @@ -508,17 +475,11 @@ struct CreateEvntRef { void setErrorNode(Uint32 val) { m_errorNode = val; } + Uint32 getMasterNode() const { + return m_masterNodeId; + } + void setMasterNode(Uint32 val) { + m_masterNodeId = val; + } }; -#if 0 -inline bool CreateEvntRef::isTemporary() const -{ return (errorCode & CreateEvntRef::Temporary) > 0; } -inline void CreateEvntRef::setTemporary() -{ errorCode |= CreateEvntRef::Temporary; } -inline CreateEvntRef::ErrorCode CreateEvntRef::setTemporary(ErrorCode ec) -{ return (CreateEvntRef::ErrorCode) - (errorCode = ((Uint32) ec | (Uint32)CreateEvntRef::Temporary)); } -inline CreateEvntRef::ErrorCode CreateEvntRef::makeTemporary(ErrorCode ec) -{ return (CreateEvntRef::ErrorCode) - ( (Uint32) ec | (Uint32)CreateEvntRef::Temporary ); } -#endif #endif diff --git a/storage/ndb/include/kernel/signaldata/CreateTrig.hpp b/storage/ndb/include/kernel/signaldata/CreateTrig.hpp index 62627256dcf..8fc1e9d48be 100644 --- a/storage/ndb/include/kernel/signaldata/CreateTrig.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateTrig.hpp @@ -146,6 +146,12 @@ public: void setMonitorAllAttributes(bool val) { BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val); } + bool getReportAllMonitoredAttributes() const { + return BitmaskImpl::getField(1, &m_triggerInfo, 26, 1); + } + void setReportAllMonitoredAttributes(bool val) { + BitmaskImpl::setField(1, &m_triggerInfo, 26, 1, val); + } Uint32 getOnline() const { return m_online; } diff --git a/storage/ndb/include/kernel/signaldata/DihFragCount.hpp b/storage/ndb/include/kernel/signaldata/DihFragCount.hpp new file mode 100644 index 00000000000..55e3c7bef4c --- /dev/null +++ b/storage/ndb/include/kernel/signaldata/DihFragCount.hpp @@ -0,0 +1,67 @@ +/* Copyright (C) 2006 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef DIH_FRAG_COUNT_HPP +#define DIH_FRAG_COUNT_HPP + +#include "SignalData.hpp" + +/** + * DihFragCountReq + */ +class DihFragCountReq { + +public: + STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( RetryInterval = 5 ); + Uint32 m_connectionData; + Uint32 m_tableRef; + Uint32 m_senderData; +}; + +/** + * DihFragCountConf + */ +class DihFragCountConf { + +public: + STATIC_CONST( SignalLength = 5 ); + Uint32 m_connectionData; + Uint32 m_tableRef; + Uint32 m_senderData; + Uint32 m_fragmentCount; + Uint32 m_noOfBackups; +}; + +/** + * DihFragCountRef + */ +class DihFragCountRef { + +public: + enum ErrorCode { + ErroneousState = 0, + ErroneousTableState = 1 + }; + STATIC_CONST( SignalLength = 5 ); + Uint32 m_connectionData; + Uint32 m_tableRef; + Uint32 m_senderData; + Uint32 m_error; + Uint32 m_tableStatus; // Dbdih::TabRecord::tabStatus +}; + +#endif diff --git a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp index 930d1c7c079..561c382a80d 100644 --- a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp +++ b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp @@ -39,7 +39,9 @@ struct SubCreateReq { RemoveFlags = 0xff, GetFlags = 0xff << 16, AddTableFlag = 0x1 << 16, - RestartFlag = 0x2 << 16 + RestartFlag = 0x2 << 16, + ReportAll = 0x4 << 16, + ReportSubscribe= 0x8 << 16 }; Uint32 senderRef; @@ -115,7 +117,8 @@ struct SubStartRef { enum ErrorCode { Undefined = 1, NF_FakeErrorREF = 11, - Busy = 701 + Busy = 701, + NotMaster = 702 }; STATIC_CONST( SignalLength = 7 ); @@ -130,7 +133,10 @@ struct SubStartRef { // do not change the order here! Uint32 errorCode; // with SignalLength2 - Uint32 subscriberRef; + union { + Uint32 subscriberRef; + Uint32 m_masterNodeId; + }; }; struct SubStartConf { @@ -181,10 +187,12 @@ struct SubStopRef { enum ErrorCode { Undefined = 1, NF_FakeErrorREF = 11, - Busy = 701 + Busy = 701, + NotMaster = 702 }; STATIC_CONST( SignalLength = 8 ); + STATIC_CONST( SignalLength2 = SignalLength+1 ); Uint32 senderRef; Uint32 senderData; @@ -194,6 +202,8 @@ struct SubStopRef { Uint32 subscriberData; Uint32 subscriberRef; Uint32 errorCode; + // with SignalLength2 + Uint32 m_masterNodeId; }; struct SubStopConf { @@ -304,7 +314,7 @@ struct SubTableData { Uint32 tableId; Uint8 operation; Uint8 req_nodeid; - Uint8 not_used2; + Uint8 ndbd_nodeid; Uint8 not_used3; Uint32 logType; }; diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 7ba906b24af..2599a391318 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -1036,6 +1036,9 @@ public: TE_GCP_COMPLETE=1<<7, ///< GCP is complete TE_CLUSTER_FAILURE=1<<8, ///< Cluster is unavailable TE_STOP =1<<9, ///< Stop of event operation + TE_NODE_FAILURE=1<<10, ///< Node failed + TE_SUBSCRIBE =1<<11, ///< Node subscribes + TE_UNSUBSCRIBE =1<<12, ///< Node unsubscribes TE_ALL=0xFFFF ///< Any/all event on table (not relevant when ///< events are received) }; @@ -1052,7 +1055,10 @@ public: _TE_GCP_COMPLETE=7, _TE_CLUSTER_FAILURE=8, _TE_STOP=9, - _TE_NUL=10 // internal (INS o DEL within same GCI) + _TE_NODE_FAILURE=10, + _TE_SUBSCRIBE=11, + _TE_UNSUBSCRIBE=12, + _TE_NUL=13 // internal (INS o DEL within same GCI) }; #endif /** @@ -1081,6 +1087,15 @@ public: }; /** + * Specifies reporting options for table events + */ + enum EventReport { + ER_UPDATED = 0, + ER_ALL = 1, + ER_SUBSCRIBE = 2 + }; + + /** * Constructor * @param name Name of event */ @@ -1135,6 +1150,14 @@ public: * Get durability of the event */ EventDurability getDurability() const; + /** + * Set report option of the event + */ + void setReport(EventReport); + /** + * Get report option of the event + */ + EventReport getReport() const; #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL void addColumn(const Column &c); #endif diff --git a/storage/ndb/include/ndbapi/NdbError.hpp b/storage/ndb/include/ndbapi/NdbError.hpp index da322897dc6..83f24b36313 100644 --- a/storage/ndb/include/ndbapi/NdbError.hpp +++ b/storage/ndb/include/ndbapi/NdbError.hpp @@ -173,7 +173,12 @@ struct NdbError { /** * Schema object already exists */ - SchemaObjectExists = ndberror_cl_schema_object_already_exists + SchemaObjectExists = ndberror_cl_schema_object_already_exists, + + /** + * Request sent to non master + */ + InternalTemporary = ndberror_cl_internal_temporary }; /** diff --git a/storage/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp index 0572147410b..4419982be52 100644 --- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp @@ -210,6 +210,7 @@ public: int hasError() const; int getReqNodeId() const; + int getNdbdNodeId() const; #endif #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL diff --git a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp index d5c051b7d2a..2ecfb7ab9ad 100644 --- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp @@ -17,6 +17,7 @@ #ifndef CLUSTER_CONNECTION_HPP #define CLUSTER_CONNECTION_HPP +#include <ndb_types.h> class Ndb_cluster_connection_node_iter { diff --git a/storage/ndb/include/ndbapi/ndberror.h b/storage/ndb/include/ndbapi/ndberror.h index 0088bbdcc63..cf03eb9da34 100644 --- a/storage/ndb/include/ndbapi/ndberror.h +++ b/storage/ndb/include/ndbapi/ndberror.h @@ -50,7 +50,8 @@ typedef enum ndberror_cl_unknown_error_code = 14, ndberror_cl_node_shutdown = 15, ndberror_cl_configuration = 16, - ndberror_cl_schema_object_already_exists = 17 + ndberror_cl_schema_object_already_exists = 17, + ndberror_cl_internal_temporary = 18 } ndberror_classification_enum; diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 04aee6cc34e..5c79277521c 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -25,6 +25,7 @@ #include <signaldata/NodeFailRep.hpp> #include <signaldata/ReadNodesConf.hpp> +#include <signaldata/DihFragCount.hpp> #include <signaldata/ScanFrag.hpp> #include <signaldata/GetTabInfo.hpp> @@ -1302,6 +1303,7 @@ Backup::sendCreateTrig(Signal* signal, for (int i=0; i < 3; i++) { req->setTriggerEvent(triggerEventValues[i]); + req->setReportAllMonitoredAttributes(false); BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i], ptr.p->backupId, tabPtr.p->tableId); w.reset(); @@ -2955,10 +2957,12 @@ next: } ndbrequire(ptr.p->tables.first(tabPtr)); - signal->theData[0] = RNIL; - signal->theData[1] = tabPtr.p->tableId; - signal->theData[2] = ptr.i; - sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB); + DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); + req->m_connectionData = RNIL; + req->m_tableRef = tabPtr.p->tableId; + req->m_senderData = ptr.i; + sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, + DihFragCountReq::SignalLength, JBB); return; }//if @@ -3131,11 +3135,11 @@ void Backup::execDI_FCOUNTCONF(Signal* signal) { jamEntry(); - - const Uint32 userPtr = signal->theData[0]; - const Uint32 fragCount = signal->theData[1]; - const Uint32 tableId = signal->theData[2]; - const Uint32 senderData = signal->theData[3]; + DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); + const Uint32 userPtr = conf->m_connectionData; + const Uint32 fragCount = conf->m_fragmentCount; + const Uint32 tableId = conf->m_tableRef; + const Uint32 senderData = conf->m_senderData; ndbrequire(userPtr == RNIL && signal->length() == 5); @@ -3162,10 +3166,12 @@ Backup::execDI_FCOUNTCONF(Signal* signal) */ if(ptr.p->tables.next(tabPtr)) { jam(); - signal->theData[0] = RNIL; - signal->theData[1] = tabPtr.p->tableId; - signal->theData[2] = ptr.i; - sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB); + DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); + req->m_connectionData = RNIL; + req->m_tableRef = tabPtr.p->tableId; + req->m_senderData = ptr.i; + sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, + DihFragCountReq::SignalLength, JBB); return; }//if diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 791a4ed439d..2ebf06a0219 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -456,6 +456,11 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, Uint32 count = 2 + data[0] * data[1]; w.add(DictTabInfo::FragmentDataLen, 2*count); w.add(DictTabInfo::FragmentData, data, 2*count); + ndbrequire(count > 0); + } + else + { + ndbrequire(false); } } @@ -2006,9 +2011,9 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal) c_opCreateTable.setSize(8); c_opDropTable.setSize(8); c_opCreateIndex.setSize(8); - c_opCreateEvent.setSize(8); - c_opSubEvent.setSize(8); - c_opDropEvent.setSize(8); + c_opCreateEvent.setSize(2); + c_opSubEvent.setSize(2); + c_opDropEvent.setSize(2); c_opSignalUtil.setSize(8); c_opDropIndex.setSize(8); c_opAlterIndex.setSize(8); @@ -7078,6 +7083,20 @@ void Dbdict::execGET_TABINFOREQ(Signal* signal) sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); return; }//if + + if (DictTabInfo::isTable(objEntry->m_tableType) || + DictTabInfo::isIndex(objEntry->m_tableType)) + { + jam(); + TableRecordPtr tabPtr; + c_tableRecordPool.getPtr(tabPtr, obj_id); + if (tabPtr.p->tabState != TableRecord::DEFINED) + { + jam(); + sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); + return; + } + } c_retrieveRecord.busyState = true; c_retrieveRecord.blockRef = req->senderRef; @@ -8655,6 +8674,23 @@ Dbdict::execCREATE_EVNT_REQ(Signal* signal) const CreateEvntReq::RequestType requestType = req->getRequestType(); const Uint32 requestFlag = req->getRequestFlag(); + if (refToBlock(signal->senderBlockRef()) != DBDICT && + getOwnNodeId() != c_masterNodeId) + { + jam(); + releaseSections(signal); + + CreateEvntRef * ref = (CreateEvntRef *)signal->getDataPtrSend(); + ref->setUserRef(reference()); + ref->setErrorCode(CreateEvntRef::NotMaster); + ref->setErrorLine(__LINE__); + ref->setErrorNode(reference()); + ref->setMasterNode(c_masterNodeId); + sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal, + CreateEvntRef::SignalLength2, JBB); + return; + } + OpCreateEventPtr evntRecPtr; // Seize a Create Event record if (!c_opCreateEvent.seize(evntRecPtr)) { @@ -8946,7 +8982,8 @@ Dbdict::createEventUTIL_PREPARE(Signal* signal, break; case CreateEvntReq::RT_USER_CREATE: { - evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType(); + evntRecPtr.p->m_eventRec.EVENT_TYPE = + evntRecPtr.p->m_request.getEventType() | evntRecPtr.p->m_request.getReportFlags(); evntRecPtr.p->m_eventRec.TABLEID = evntRecPtr.p->m_request.getTableId(); evntRecPtr.p->m_eventRec.TABLEVERSION=evntRecPtr.p->m_request.getTableVersion(); AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask(); @@ -9161,6 +9198,7 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal, parseReadEventSys(signal, evntRecPtr.p->m_eventRec); evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE); + evntRec->m_request.setReportFlags(evntRecPtr.p->m_eventRec.EVENT_TYPE); evntRec->m_request.setTableId(evntRecPtr.p->m_eventRec.TABLEID); evntRec->m_request.setTableVersion(evntRecPtr.p->m_eventRec.TABLEVERSION); evntRec->m_request.setAttrListBitmask(*(AttributeMask*) @@ -9392,6 +9430,7 @@ void Dbdict::execCREATE_EVNT_CONF(Signal* signal) void Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){ + DBUG_ENTER("Dbdict::createEvent_RT_DICT_AFTER_GET"); jam(); evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef()); @@ -9412,6 +9451,10 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPt sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId(); sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey(); sumaReq->subscriptionType = SubCreateReq::TableEvent; + if (evntRecPtr.p->m_request.getReportAll()) + sumaReq->subscriptionType|= SubCreateReq::ReportAll; + if (evntRecPtr.p->m_request.getReportSubscribe()) + sumaReq->subscriptionType|= SubCreateReq::ReportSubscribe; sumaReq->tableId = evntRecPtr.p->m_request.getTableId(); #ifdef EVENT_PH2_DEBUG @@ -9420,6 +9463,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPt sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal, SubCreateReq::SignalLength, JBB); + DBUG_VOID_RETURN; } void Dbdict::execSUB_CREATE_REF(Signal* signal) @@ -9582,6 +9626,20 @@ void Dbdict::execSUB_START_REQ(Signal* signal) Uint32 origSenderRef = signal->senderBlockRef(); + if (refToBlock(origSenderRef) != DBDICT && + getOwnNodeId() != c_masterNodeId) + { + /* + * Coordinator but not master + */ + SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); + ref->senderRef = reference(); + ref->errorCode = SubStartRef::NotMaster; + ref->m_masterNodeId = c_masterNodeId; + sendSignal(origSenderRef, GSN_SUB_START_REF, signal, + SubStartRef::SignalLength2, JBB); + return; + } OpSubEventPtr subbPtr; if (!c_opSubEvent.seize(subbPtr)) { SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); @@ -9761,6 +9819,9 @@ void Dbdict::completeSubStartReq(Signal* signal, #ifdef EVENT_DEBUG ndbout_c("SUB_START_REF"); #endif + SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); + ref->senderRef = reference(); + ref->errorCode = subbPtr.p->m_errorCode; sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, signal, SubStartRef::SignalLength, JBB); if (subbPtr.p->m_reqTracker.hasConf()) { @@ -9789,6 +9850,20 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal) Uint32 origSenderRef = signal->senderBlockRef(); + if (refToBlock(origSenderRef) != DBDICT && + getOwnNodeId() != c_masterNodeId) + { + /* + * Coordinator but not master + */ + SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); + ref->senderRef = reference(); + ref->errorCode = SubStopRef::NotMaster; + ref->m_masterNodeId = c_masterNodeId; + sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal, + SubStopRef::SignalLength2, JBB); + return; + } OpSubEventPtr subbPtr; if (!c_opSubEvent.seize(subbPtr)) { SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); @@ -9982,6 +10057,23 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal) const Uint32 senderRef = signal->senderBlockRef(); OpDropEventPtr evntRecPtr; + if (refToBlock(senderRef) != DBDICT && + getOwnNodeId() != c_masterNodeId) + { + jam(); + releaseSections(signal); + + DropEvntRef * ref = (DropEvntRef *)signal->getDataPtrSend(); + ref->setUserRef(reference()); + ref->setErrorCode(DropEvntRef::NotMaster); + ref->setErrorLine(__LINE__); + ref->setErrorNode(reference()); + ref->setMasterNode(c_masterNodeId); + sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, + DropEvntRef::SignalLength2, JBB); + return; + } + // Seize a Create Event record if (!c_opDropEvent.seize(evntRecPtr)) { // Failed to allocate event record @@ -10825,6 +10917,7 @@ Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr) if (indexPtr.p->isHashIndex()) { req->setTriggerType(TriggerType::SECONDARY_INDEX); req->setMonitorReplicas(false); + req->setReportAllMonitoredAttributes(true); // insert if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) req->setTriggerId(indexPtr.p->insertTriggerId); @@ -10867,6 +10960,7 @@ Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr) req->setTriggerType(TriggerType::ORDERED_INDEX); req->setTriggerActionTime(TriggerActionTime::TA_CUSTOM); req->setMonitorReplicas(true); + req->setReportAllMonitoredAttributes(true); // one trigger for 5 events (insert, update, delete, commit, abort) if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) req->setTriggerId(indexPtr.p->customTriggerId); @@ -11383,6 +11477,7 @@ Dbdict::buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr) req->setTriggerEvent(TriggerEvent::TE_UPDATE); req->setMonitorReplicas(false); req->setMonitorAllAttributes(false); + req->setReportAllMonitoredAttributes(true); req->setOnline(true); // alter online after create req->setReceiverRef(0); // no receiver, REF-ed by TUP req->getAttributeMask().clear(); @@ -11942,6 +12037,7 @@ Dbdict::createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr) triggerPtr.p->triggerEvent = req->getTriggerEvent(); triggerPtr.p->monitorReplicas = req->getMonitorReplicas(); triggerPtr.p->monitorAllAttributes = req->getMonitorAllAttributes(); + triggerPtr.p->reportAllMonitoredAttributes = req->getReportAllMonitoredAttributes(); triggerPtr.p->attributeMask = req->getAttributeMask(); triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE; // add to hash table @@ -12774,6 +12870,7 @@ Dbdict::alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr) req->setTriggerEvent(triggerPtr.p->triggerEvent); req->setMonitorReplicas(triggerPtr.p->monitorReplicas); req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes); + req->setReportAllMonitoredAttributes(triggerPtr.p->reportAllMonitoredAttributes); req->setOnline(true); req->setReceiverRef(opPtr.p->m_request.getReceiverRef()); BlockReference blockRef = 0; diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index f2b0210288a..7ad5e0d8b49 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -414,7 +414,10 @@ public: /** Monitor all, the trigger monitors changes of all attributes in table */ bool monitorAllAttributes; - + + /** Monitor all, the trigger monitors changes of all attributes in table */ + bool reportAllMonitoredAttributes; + /** * Attribute mask, defines what attributes are to be monitored. * Can be seen as a compact representation of SQL column name list. @@ -1580,16 +1583,15 @@ private: RequestTracker m_reqTracker; // state info CreateEvntReq::RequestType m_requestType; - Uint32 m_requestFlag; // error info Uint32 m_errorCode; Uint32 m_errorLine; - Uint32 m_errorNode; + Uint32 m_errorNode; /* also used to store master node id + in case of NotMaster */ // ctor OpCreateEvent() { memset(&m_request, 0, sizeof(m_request)); m_requestType = CreateEvntReq::RT_UNDEFINED; - m_requestFlag = 0; m_errorCode = CreateEvntRef::NoError; m_errorLine = 0; m_errorNode = 0; @@ -1600,7 +1602,6 @@ private: m_errorLine = 0; m_errorNode = 0; m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); } bool hasError() { return m_errorCode != CreateEvntRef::NoError; diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index fb8372a6c9c..18b0929b017 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -67,12 +67,6 @@ // Error Codes for Add Table // -------------------------------------- #define ZREPLERROR1 306 -#define ZNOTIMPLEMENTED 307 -#define ZTABLEINSTALLED 310 -// -------------------------------------- -// Error Codes for Scan Table -// -------------------------------------- -#define ZERRONOUSSTATE 308 // -------------------------------------- // Crash Codes diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 61b60cbc294..2938d1197aa 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -67,6 +67,7 @@ #include <signaldata/CreateFragmentation.hpp> #include <signaldata/LqhFrag.hpp> #include <signaldata/FsOpenReq.hpp> +#include <signaldata/DihFragCount.hpp> #include <DebuggerNames.hpp> #include <EventLogger.hpp> @@ -7342,39 +7343,66 @@ void Dbdih::execDIVERIFYREQ(Signal* signal) void Dbdih::execDI_FCOUNTREQ(Signal* signal) { + DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtr(); ConnectRecordPtr connectPtr; TabRecordPtr tabPtr; + const BlockReference senderRef = signal->senderBlockRef(); + const Uint32 senderData = req->m_senderData; jamEntry(); - connectPtr.i = signal->theData[0]; - tabPtr.i = signal->theData[1]; + connectPtr.i = req->m_connectionData; + tabPtr.i = req->m_tableRef; ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE); + if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) + { + DihFragCountRef* ref = (DihFragCountRef*)signal->getDataPtrSend(); + //connectPtr.i == RNIL -> question without connect record + if(connectPtr.i == RNIL) + ref->m_connectionData = RNIL; + else + ref->m_connectionData = connectPtr.p->userpointer; + ref->m_tableRef = tabPtr.i; + ref->m_senderData = senderData; + ref->m_error = DihFragCountRef::ErroneousTableState; + ref->m_tableStatus = tabPtr.p->tabStatus; + sendSignal(senderRef, GSN_DI_FCOUNTREF, signal, + DihFragCountRef::SignalLength, JBB); + return; + } if(connectPtr.i != RNIL){ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); if (connectPtr.p->connectState == ConnectRecord::INUSE) { jam(); - signal->theData[0] = connectPtr.p->userpointer; - signal->theData[1] = tabPtr.p->totalfragments; - sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal,2, JBB); + DihFragCountConf* conf = (DihFragCountConf*)signal->getDataPtrSend(); + conf->m_connectionData = connectPtr.p->userpointer; + conf->m_tableRef = tabPtr.i; + conf->m_senderData = senderData; + conf->m_fragmentCount = tabPtr.p->totalfragments; + conf->m_noOfBackups = tabPtr.p->noOfBackups; + sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal, + DihFragCountConf::SignalLength, JBB); return; }//if - signal->theData[0] = connectPtr.p->userpointer; - signal->theData[1] = ZERRONOUSSTATE; - sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, 2, JBB); + DihFragCountRef* ref = (DihFragCountRef*)signal->getDataPtrSend(); + ref->m_connectionData = connectPtr.p->userpointer; + ref->m_tableRef = tabPtr.i; + ref->m_senderData = senderData; + ref->m_error = DihFragCountRef::ErroneousTableState; + ref->m_tableStatus = tabPtr.p->tabStatus; + sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, + DihFragCountRef::SignalLength, JBB); return; }//if - + DihFragCountConf* conf = (DihFragCountConf*)signal->getDataPtrSend(); //connectPtr.i == RNIL -> question without connect record - const Uint32 senderData = signal->theData[2]; - const BlockReference senderRef = signal->senderBlockRef(); - signal->theData[0] = RNIL; - signal->theData[1] = tabPtr.p->totalfragments; - signal->theData[2] = tabPtr.i; - signal->theData[3] = senderData; - signal->theData[4] = tabPtr.p->noOfBackups; - sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, 5, JBB); + conf->m_connectionData = RNIL; + conf->m_tableRef = tabPtr.i; + conf->m_senderData = senderData; + conf->m_fragmentCount = tabPtr.p->totalfragments; + conf->m_noOfBackups = tabPtr.p->noOfBackups; + sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, + DihFragCountConf::SignalLength, JBB); }//Dbdih::execDI_FCOUNTREQ() void Dbdih::execDIGETPRIMREQ(Signal* signal) diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index b6c3ae82144..f7888a8059d 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -38,6 +38,7 @@ #include <signaldata/TcContinueB.hpp> #include <signaldata/TcKeyFailConf.hpp> #include <signaldata/AbortAll.hpp> +#include <signaldata/DihFragCount.hpp> #include <signaldata/ScanFrag.hpp> #include <signaldata/ScanTab.hpp> #include <signaldata/PrepDropTab.hpp> @@ -8886,9 +8887,11 @@ void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr) * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED. * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE. ***************************************************/ - signal->theData[0] = tcConnectptr.p->dihConnectptr; - signal->theData[1] = scanptr.p->scanTableref; - sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB); + DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); + req->m_connectionData = tcConnectptr.p->dihConnectptr; + req->m_tableRef = scanptr.p->scanTableref; + sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, + DihFragCountReq::SignalLength, JBB); } else { @@ -8899,17 +8902,18 @@ void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr) UintR TerrorIndicator = signal->theData[0]; jamEntry(); if (TerrorIndicator != 0) { - signal->theData[0] = tcConnectptr.i; - //signal->theData[1] Contains error + DihFragCountRef * const ref = (DihFragCountRef*)signal->getDataPtr(); + ref->m_connectionData = tcConnectptr.i; + ref->m_error = signal->theData[1]; execDI_FCOUNTREF(signal); return; } UintR Tdata1 = signal->theData[1]; scanptr.p->scanNextFragId = Tdata1; - - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = 1; // Frag count + DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); + conf->m_connectionData = tcConnectptr.i; + conf->m_fragmentCount = 1; // Frag count execDI_FCOUNTCONF(signal); } return; @@ -8927,8 +8931,9 @@ void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr) void Dbtc::execDI_FCOUNTCONF(Signal* signal) { jamEntry(); - tcConnectptr.i = signal->theData[0]; - Uint32 tfragCount = signal->theData[1]; + DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); + tcConnectptr.i = conf->m_connectionData; + Uint32 tfragCount = conf->m_fragmentCount; ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); apiConnectptr.i = tcConnectptr.p->apiConnect; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -9011,9 +9016,10 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal) void Dbtc::execDI_FCOUNTREF(Signal* signal) { jamEntry(); - tcConnectptr.i = signal->theData[0]; + DihFragCountRef * const ref = (DihFragCountRef*)signal->getDataPtr(); + tcConnectptr.i = ref->m_connectionData; ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - const Uint32 errCode = signal->theData[1]; + const Uint32 errCode = ref->m_error; apiConnectptr.i = tcConnectptr.p->apiConnect; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); ScanRecordPtr scanptr; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index 7d496349e1d..080c9cbb589 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -266,6 +266,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) ljam(); tptr.p->sendBeforeValues = false; } + /* tptr.p->sendOnlyChangedAttributes = false; if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) || (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) && @@ -273,7 +274,8 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) ljam(); tptr.p->sendOnlyChangedAttributes = true; } - + */ + tptr.p->sendOnlyChangedAttributes = !req->getReportAllMonitoredAttributes(); // Set monitor all tptr.p->monitorAllAttributes = req->getMonitorAllAttributes(); tptr.p->monitorReplicas = req->getMonitorReplicas(); diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 43eb3a965b3..44ff6e97110 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -43,11 +43,13 @@ #include <signaldata/CreateTab.hpp> #include <signaldata/DropTab.hpp> #include <signaldata/AlterTab.hpp> +#include <signaldata/DihFragCount.hpp> #include <ndbapi/NdbDictionary.hpp> #include <DebuggerNames.hpp> #include <../dbtup/Dbtup.hpp> +#include <../dbdih/Dbdih.hpp> //#define HANDOVER_DEBUG //#define NODEFAIL_DEBUG @@ -1023,6 +1025,10 @@ Suma::execSUB_CREATE_REQ(Signal* signal) const Uint32 flags = req.subscriptionType & SubCreateReq::GetFlags; const bool addTableFlag = (flags & SubCreateReq::AddTableFlag) != 0; const bool restartFlag = (flags & SubCreateReq::RestartFlag) != 0; + const Uint32 reportAll = (flags & SubCreateReq::ReportAll) ? + Subscription::REPORT_ALL : 0; + const Uint32 reportSubscribe = (flags & SubCreateReq::ReportSubscribe) ? + Subscription::REPORT_SUBSCRIBE : 0; const Uint32 tableId = req.tableId; Subscription key; @@ -1071,11 +1077,13 @@ Suma::execSUB_CREATE_REQ(Signal* signal) subPtr.p->m_subscriptionId = subId; subPtr.p->m_subscriptionKey = subKey; subPtr.p->m_subscriptionType = type; + subPtr.p->m_options = reportSubscribe | reportAll; subPtr.p->m_tableId = tableId; subPtr.p->m_table_ptrI = RNIL; subPtr.p->m_state = Subscription::DEFINED; subPtr.p->n_subscribers = 0; + fprintf(stderr, "table %d options %x\n", subPtr.p->m_tableId, subPtr.p->m_options); DBUG_PRINT("info",("Added: key.m_subscriptionId: %u, key.m_subscriptionKey: %u", key.m_subscriptionId, key.m_subscriptionKey)); @@ -1167,6 +1175,8 @@ Suma::execSUB_SYNC_REQ(Signal* signal) TablePtr tabPtr; initTable(signal,subPtr.p->m_tableId,tabPtr,syncPtr); tabPtr.p->n_subscribers++; + if (subPtr.p->m_options & Subscription::REPORT_ALL) + tabPtr.p->m_reportAll = true; DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u", tabPtr.p->m_tableId, tabPtr.p->n_subscribers)); DBUG_VOID_RETURN; @@ -1532,10 +1542,12 @@ Suma::execGET_TABINFO_CONF(Signal* signal){ * We need to gather fragment info */ jam(); - signal->theData[0] = RNIL; - signal->theData[1] = tableId; - signal->theData[2] = tabPtr.i; - sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB); + DihFragCountReq* req = (DihFragCountReq*)signal->getDataPtrSend(); + req->m_connectionData = RNIL; + req->m_tableRef = tableId; + req->m_senderData = tabPtr.i; + sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, + DihFragCountReq::SignalLength, JBB); } bool @@ -1681,20 +1693,52 @@ ToDo handle this } void +Suma::execDI_FCOUNTREF(Signal* signal) +{ + jamEntry(); + DBUG_ENTER("Suma::execDI_FCOUNTREF"); + DihFragCountRef * const ref = (DihFragCountRef*)signal->getDataPtr(); + switch ((DihFragCountRef::ErrorCode) ref->m_error) + { + case DihFragCountRef::ErroneousTableState: + jam(); + if (ref->m_tableStatus == Dbdih::TabRecord::TS_CREATING) + { + const Uint32 tableId = ref->m_senderData; + const Uint32 tabPtr_i = ref->m_tableRef; + DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); + + req->m_connectionData = RNIL; + req->m_tableRef = tabPtr_i; + req->m_senderData = tableId; + sendSignalWithDelay(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, + DihFragCountReq::SignalLength, + DihFragCountReq::RetryInterval); + DBUG_VOID_RETURN; + } + ndbrequire(false); + default: + ndbrequire(false); + } + + DBUG_VOID_RETURN; +} + +void Suma::execDI_FCOUNTCONF(Signal* signal) { jamEntry(); DBUG_ENTER("Suma::execDI_FCOUNTCONF"); ndbassert(signal->getNoOfSections() == 0); - - const Uint32 userPtr = signal->theData[0]; - const Uint32 fragCount = signal->theData[1]; - const Uint32 tableId = signal->theData[2]; + DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); + const Uint32 userPtr = conf->m_connectionData; + const Uint32 fragCount = conf->m_fragmentCount; + const Uint32 tableId = conf->m_tableRef; ndbrequire(userPtr == RNIL && signal->length() == 5); TablePtr tabPtr; - tabPtr.i= signal->theData[3]; + tabPtr.i= conf->m_senderData; ndbrequire((tabPtr.p= c_tablePool.getPtr(tabPtr.i)) != 0); ndbrequire(tabPtr.p->m_tableId == tableId); @@ -2156,6 +2200,8 @@ Suma::execSUB_START_REQ(Signal* signal){ jam(); initTable(signal,subPtr.p->m_tableId,tabPtr,subbPtr); tabPtr.p->n_subscribers++; + if (subPtr.p->m_options & Subscription::REPORT_ALL) + tabPtr.p->m_reportAll = true; DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u", tabPtr.p->m_tableId, tabPtr.p->n_subscribers)); DBUG_VOID_RETURN; @@ -2198,6 +2244,10 @@ Suma::sendSubStartComplete(Signal* signal, subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey)); sendSignal(subPtr.p->m_senderRef, GSN_SUB_START_CONF, signal, SubStartConf::SignalLength, JBB); + + reportAllSubscribers(signal, NdbDictionary::Event::_TE_SUBSCRIBE, + subPtr, subbPtr); + DBUG_VOID_RETURN; } @@ -2470,9 +2520,60 @@ Suma::sendSubStopComplete(Signal* signal, SubscriberPtr subbPtr) DBUG_PRINT("info",("c_subscriberPool size: %d free: %d", c_subscriberPool.getSize(), c_subscriberPool.getNoOfFree())); + + reportAllSubscribers(signal, NdbDictionary::Event::_TE_UNSUBSCRIBE, + subPtr, subbPtr); + DBUG_VOID_RETURN; } +// report new started subscriber to all other subscribers +void +Suma::reportAllSubscribers(Signal *signal, + NdbDictionary::Event::_TableEvent table_event, + SubscriptionPtr subPtr, + SubscriberPtr subbPtr) +{ + if (!(subPtr.p->m_options & Subscription::REPORT_SUBSCRIBE)) + { + return; + } + if (subPtr.p->n_subscribers == 0) + { + ndbrequire(table_event != NdbDictionary::Event::_TE_SUBSCRIBE); + return; + } + + SubTableData * data = (SubTableData*)signal->getDataPtrSend(); + data->gci = m_last_complete_gci + 1; + data->tableId = subPtr.p->m_tableId; + data->operation = table_event; + data->logType = 0; + data->ndbd_nodeid = refToNode(reference()); + + TablePtr tabPtr; + c_tables.getPtr(tabPtr, subPtr.p->m_table_ptrI); + LocalDLList<Subscriber> subbs(c_subscriberPool, tabPtr.p->c_subscribers); + SubscriberPtr i_subbPtr; + for(subbs.first(i_subbPtr); !i_subbPtr.isNull(); subbs.next(i_subbPtr)) + { + if (i_subbPtr.p->m_subPtrI == subPtr.i) + { + data->req_nodeid = refToNode(subbPtr.p->m_senderRef); + data->senderData = i_subbPtr.p->m_senderData; + sendSignal(i_subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal, + SubTableData::SignalLength, JBB); + if (i_subbPtr.i != subbPtr.i) + { + data->req_nodeid = refToNode(i_subbPtr.p->m_senderRef); + data->senderData = subbPtr.p->m_senderData; + sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal, + SubTableData::SignalLength, JBB); + } + } + } +} + void Suma::sendSubStopRef(Signal* signal, Uint32 errCode) { @@ -2526,6 +2627,7 @@ Suma::Table::setupTrigger(Signal* signal, req->setTriggerEvent((TriggerEvent::Value)j); req->setTableId(m_tableId); req->setAttributeMask(attrMask); + req->setReportAllMonitoredAttributes(m_reportAll); suma.sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ, signal, CreateTrigReq::SignalLength, JBB); ret= 1; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 70c63b1363d..913e16df8f2 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -33,6 +33,7 @@ #include <signaldata/UtilSequence.hpp> #include <signaldata/SumaImpl.hpp> +#include <ndbapi/NdbDictionary.hpp> class Suma : public SimulatedBlock { BLOCK_DEFINES(Suma); @@ -147,6 +148,50 @@ public: /** * Subscriptions */ + + struct Subscription { + Uint32 m_senderRef; + Uint32 m_senderData; + Uint32 m_subscriptionId; + Uint32 m_subscriptionKey; + Uint32 m_subscriptionType; + Uint16 m_options; + + enum Options { + REPORT_ALL = 0x1, + REPORT_SUBSCRIBE = 0x2 + }; + + enum State { + UNDEFINED, + LOCKED, + DEFINED, + DROPPED + }; + State m_state; + Uint32 n_subscribers; + + Uint32 nextHash; + union { Uint32 prevHash; Uint32 nextPool; }; + + Uint32 hashValue() const { + return m_subscriptionId + m_subscriptionKey; + } + + bool equal(const Subscription & s) const { + return + m_subscriptionId == s.m_subscriptionId && + m_subscriptionKey == s.m_subscriptionKey; + } + /** + * The following holds the tables included + * in the subscription. + */ + Uint32 m_tableId; + Uint32 m_table_ptrI; + }; + typedef Ptr<Subscription> SubscriptionPtr; + class Table; friend class Table; typedef Ptr<Table> TablePtr; @@ -229,6 +274,7 @@ public: SubscriberPtr m_drop_subbPtr; Uint32 n_subscribers; + bool m_reportAll; bool parseTable(SegmentedSectionPtr ptr, Suma &suma); /** @@ -283,43 +329,6 @@ public: } }; - struct Subscription { - Uint32 m_senderRef; - Uint32 m_senderData; - Uint32 m_subscriptionId; - Uint32 m_subscriptionKey; - Uint32 m_subscriptionType; - - enum State { - UNDEFINED, - LOCKED, - DEFINED, - DROPPED - }; - State m_state; - Uint32 n_subscribers; - - Uint32 nextHash; - union { Uint32 prevHash; Uint32 nextPool; }; - - Uint32 hashValue() const { - return m_subscriptionId + m_subscriptionKey; - } - - bool equal(const Subscription & s) const { - return - m_subscriptionId == s.m_subscriptionId && - m_subscriptionKey == s.m_subscriptionKey; - } - /** - * The following holds the tables included - * in the subscription. - */ - Uint32 m_tableId; - Uint32 m_table_ptrI; - }; - typedef Ptr<Subscription> SubscriptionPtr; - /** * */ @@ -368,6 +377,11 @@ public: void completeSubRemove(SubscriptionPtr subPtr); + void reportAllSubscribers(Signal *signal, + NdbDictionary::Event::_TableEvent table_event, + SubscriptionPtr subPtr, + SubscriberPtr subbPtr); + Uint32 getFirstGCI(Signal* signal); /** diff --git a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp index 8b8652b682a..b46266166cd 100644 --- a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp +++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp @@ -92,6 +92,7 @@ Suma::Suma(const Configuration & conf) : * Dih interface */ addRecSignal(GSN_DI_FCOUNTCONF, &Suma::execDI_FCOUNTCONF); + addRecSignal(GSN_DI_FCOUNTREF, &Suma::execDI_FCOUNTREF); addRecSignal(GSN_DIGETPRIMCONF, &Suma::execDIGETPRIMCONF); /** diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 8bfedc2f96f..994d5da4afc 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -1310,7 +1310,12 @@ void Ndb::setReportThreshEventGCISlip(unsigned thresh) void Ndb::setReportThreshEventFreeMem(unsigned thresh) { - theEventBuffer->m_free_thresh= thresh; + if (theEventBuffer->m_free_thresh != thresh) + { + theEventBuffer->m_free_thresh= thresh; + theEventBuffer->m_min_free_thresh= thresh; + theEventBuffer->m_max_free_thresh= 100; + } } #ifdef VM_TRACE diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 3c42e232846..19069d2a16d 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -760,6 +760,18 @@ NdbDictionary::Event::getDurability() const } void +NdbDictionary::Event::setReport(EventReport r) +{ + m_impl.setReport(r); +} + +NdbDictionary::Event::EventReport +NdbDictionary::Event::getReport() const +{ + return m_impl.getReport(); +} + +void NdbDictionary::Event::addColumn(const Column & c){ NdbColumnImpl* col = new NdbColumnImpl; (* col) = NdbColumnImpl::getImpl(c); diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 507fc3979c7..3bb1e5838f0 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -460,10 +460,13 @@ NdbTableImpl::assign(const NdbTableImpl& org) { m_internalName.assign(org.m_internalName); updateMysqlName(); - m_externalName.assign(org.m_externalName); - m_newExternalName.assign(org.m_newExternalName); + // If the name has been explicitly set, use that name + // otherwise use the fetched name + if (!org.m_newExternalName.empty()) + m_externalName.assign(org.m_newExternalName); + else + m_externalName.assign(org.m_externalName); m_frm.assign(org.m_frm.get_data(), org.m_frm.length()); - m_newFrm.assign(org.m_newFrm.get_data(), org.m_newFrm.length()); m_ng.assign(org.m_ng.get_data(), org.m_ng.length()); m_fragmentType = org.m_fragmentType; m_fragmentCount = org.m_fragmentCount; @@ -795,6 +798,7 @@ void NdbEventImpl::init() mi_type= 0; m_dur= NdbDictionary::Event::ED_UNDEFINED; m_tableImpl= NULL; + m_rep= NdbDictionary::Event::ER_UPDATED; } NdbEventImpl::~NdbEventImpl() @@ -850,6 +854,18 @@ NdbEventImpl::getDurability() const return m_dur; } +void +NdbEventImpl::setReport(NdbDictionary::Event::EventReport r) +{ + m_rep = r; +} + +NdbDictionary::Event::EventReport +NdbEventImpl::getReport() const +{ + return m_rep; +} + int NdbEventImpl::getNoOfEventColumns() const { return m_attrIds.size() + m_columns.size(); @@ -1257,13 +1273,13 @@ NdbDictInterface::dictSignal(NdbApiSignal* sig, if(m_error.code && errcodes) { - for(int j = 0; errcodes[j] ; j++){ + int j; + for(j = 0; errcodes[j] ; j++){ if(m_error.code == errcodes[j]){ - m_error.code = 0; break; } } - if(!m_error.code) // Accepted error code + if(errcodes[j]) // Accepted error code continue; } break; @@ -1644,6 +1660,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, * ret = impl; + DBUG_ASSERT(impl->m_fragmentCount > 0); DBUG_RETURN(0); } @@ -1654,6 +1671,9 @@ int NdbDictionaryImpl::createTable(NdbTableImpl &t) { DBUG_ENTER("NdbDictionaryImpl::createTable"); + // If the a new name has not been set, used the copied name + if (t.m_newExternalName.empty()) + t.m_newExternalName.assign(t.m_externalName); if (m_receiver.createTable(m_ndb, t) != 0) { DBUG_RETURN(-1); @@ -2748,6 +2768,11 @@ NdbDictInterface::createEvent(class Ndb & ndb, req->setTableVersion(evnt.m_tableVersion); req->setAttrListBitmask(evnt.m_attrListBitmask); req->setEventType(evnt.mi_type); + req->clearFlags(); + if (evnt.m_rep & NdbDictionary::Event::ER_ALL) + req->setReportAll(); + if (evnt.m_rep & NdbDictionary::Event::ER_SUBSCRIBE) + req->setReportSubscribe(); } UtilBufferWriter w(m_buffer); @@ -2772,12 +2797,11 @@ NdbDictInterface::createEvent(class Ndb & ndb, ptr[0].p = (Uint32*)m_buffer.get_data(); ptr[0].sz = (m_buffer.length()+3) >> 2; - int errCodes[] = { CreateEvntRef::Busy, 0 }; int ret = dictSignal(&tSignal,ptr, 1, 0, // master WAIT_CREATE_INDX_REQ, WAITFOR_RESPONSE_TIMEOUT, 100, - errCodes, 0); + 0, -1); if (ret) { DBUG_RETURN(ret); @@ -2842,12 +2866,11 @@ NdbDictInterface::executeSubscribeEvent(class Ndb & ndb, "subscriberData=%d",req->subscriptionId, req->subscriptionKey,req->subscriberData)); - int errCodes[] = { SubStartRef::Busy, 0 }; DBUG_RETURN(dictSignal(&tSignal,NULL,0, 0 /*use masternode id*/, WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/, -1, 100, - errCodes, 0)); + 0, -1)); } int @@ -2881,12 +2904,11 @@ NdbDictInterface::stopSubscribeEvent(class Ndb & ndb, "subscriberData=%d",req->subscriptionId, req->subscriptionKey,req->subscriberData)); - int errCodes[] = { SubStopRef::Busy, 0 }; DBUG_RETURN(dictSignal(&tSignal,NULL,0, 0 /*use masternode id*/, WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/, -1, 100, - errCodes, 0)); + 0, -1)); } NdbEventImpl * @@ -3024,6 +3046,8 @@ NdbDictInterface::execCREATE_EVNT_REF(NdbApiSignal * signal, m_error.code= ref->getErrorCode(); DBUG_PRINT("error",("error=%d,line=%d,node=%d",ref->getErrorCode(), ref->getErrorLine(),ref->getErrorNode())); + if (m_error.code == CreateEvntRef::NotMaster) + m_masterNodeId = ref->getMasterNode(); m_waiter.signal(NO_WAIT); DBUG_VOID_RETURN; } @@ -3061,6 +3085,8 @@ NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal, DBUG_PRINT("error",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d,error=%d", subscriptionId,subscriptionKey,subscriberData,m_error.code)); + if (m_error.code == SubStopRef::NotMaster) + m_masterNodeId = subStopRef->m_masterNodeId; m_waiter.signal(NO_WAIT); DBUG_VOID_RETURN; } @@ -3109,6 +3135,8 @@ NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal, const SubStartRef * const subStartRef= CAST_CONSTPTR(SubStartRef, signal->getDataPtr()); m_error.code= subStartRef->errorCode; + if (m_error.code == SubStartRef::NotMaster) + m_masterNodeId = subStartRef->m_masterNodeId; m_waiter.signal(NO_WAIT); DBUG_VOID_RETURN; } @@ -3149,12 +3177,11 @@ NdbDictInterface::dropEvent(const NdbEventImpl &evnt) ptr[0].p = (Uint32*)m_buffer.get_data(); ptr[0].sz = (m_buffer.length()+3) >> 2; - //TODO return dictSignal(&tSignal,ptr, 1, 0 /*use masternode id*/, WAIT_CREATE_INDX_REQ, -1, 100, - 0, 0); + 0, -1); } void @@ -3177,7 +3204,8 @@ NdbDictInterface::execDROP_EVNT_REF(NdbApiSignal * signal, DBUG_PRINT("info",("ErrorCode=%u Errorline=%u ErrorNode=%u", ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode())); - + if (m_error.code == DropEvntRef::NotMaster) + m_masterNodeId = ref->getMasterNode(); m_waiter.signal(NO_WAIT); DBUG_VOID_RETURN; } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index cd6b9199250..c69172cd489 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -251,6 +251,8 @@ public: void addTableEvent(const NdbDictionary::Event::TableEvent t); void setDurability(NdbDictionary::Event::EventDurability d); NdbDictionary::Event::EventDurability getDurability() const; + void setReport(NdbDictionary::Event::EventReport r); + NdbDictionary::Event::EventReport getReport() const; void addEventColumn(const NdbColumnImpl &c); int getNoOfEventColumns() const; @@ -268,7 +270,7 @@ public: BaseString m_name; Uint32 mi_type; NdbDictionary::Event::EventDurability m_dur; - + NdbDictionary::Event::EventReport m_rep; NdbTableImpl *m_tableImpl; BaseString m_tableName; diff --git a/storage/ndb/src/ndbapi/NdbEventOperation.cpp b/storage/ndb/src/ndbapi/NdbEventOperation.cpp index dd317f2911b..78a848323b8 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp @@ -162,6 +162,11 @@ int NdbEventOperation::getReqNodeId() const return m_impl.m_data_item->sdata->req_nodeid; } +int NdbEventOperation::getNdbdNodeId() const +{ + return m_impl.m_data_item->sdata->ndbd_nodeid; +} + /* * Private members */ diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 78ca0dffc4f..8d8f4c21b70 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -270,6 +270,7 @@ NdbEventOperationImpl::execute() m_state= EO_ERROR; mi_type= 0; m_magic_number= 0; + m_error.code= myDict->getNdbError().code; m_ndb->theEventBuffer->remove_op(); m_ndb->theEventBuffer->add_drop_unlock(); DBUG_RETURN(r); @@ -672,7 +673,7 @@ NdbEventBuffer::pollEvents(int aMillisecondNumber, Uint64 *latestGCI) NdbMutex_Lock(m_mutex); NdbEventOperationImpl *ev_op= move_data(); - if (unlikely(ev_op == 0)) + if (unlikely(ev_op == 0 && aMillisecondNumber)) { NdbCondition_WaitTimeout(p_cond, m_mutex, aMillisecondNumber); ev_op= move_data(); @@ -1015,6 +1016,33 @@ NdbEventBuffer::complete_outof_order_gcis() } void +NdbEventBuffer::report_node_failure(Uint32 node_id) +{ + DBUG_ENTER("NdbEventBuffer::report_node_failure"); + SubTableData data; + LinearSectionPtr ptr[3]; + bzero(&data, sizeof(data)); + bzero(ptr, sizeof(ptr)); + + data.tableId = ~0; + data.operation = NdbDictionary::Event::_TE_NODE_FAILURE; + data.req_nodeid = (Uint8)node_id; + data.ndbd_nodeid = (Uint8)node_id; + data.logType = SubTableData::LOG; + /** + * Insert this event for each operation + */ + NdbEventOperation* op= 0; + while((op = m_ndb->getEventOperation(op))) + { + NdbEventOperationImpl* impl= &op->m_impl; + data.senderData = impl->m_oid; + insertDataL(impl, &data, ptr); + } + DBUG_VOID_RETURN; +} + +void NdbEventBuffer::completeClusterFailed() { DBUG_ENTER("NdbEventBuffer::completeClusterFailed"); @@ -1648,7 +1676,7 @@ NdbEventBuffer::reportStatus() m_min_free_thresh= m_free_thresh; m_max_free_thresh= 100; goto send_report; - } + } if (latest_gci-apply_gci >= m_gci_slip_thresh) { goto send_report; diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index a04b5fecb34..2b5f5812efe 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -254,7 +254,7 @@ public: void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep); void complete_outof_order_gcis(); - void reportClusterFailed(NdbEventOperationImpl *op); + void report_node_failure(Uint32 node_id); void completeClusterFailed(); // used by user thread diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp index 6104a97b213..e45a03771b9 100644 --- a/storage/ndb/src/ndbapi/Ndbif.cpp +++ b/storage/ndb/src/ndbapi/Ndbif.cpp @@ -253,12 +253,17 @@ Ndb::report_node_failure(Uint32 node_id) void Ndb::report_node_failure_completed(Uint32 node_id) { - if (theEventBuffer && - !TransporterFacade::instance()->theClusterMgr->isClusterAlive()) + if (theEventBuffer) { - // cluster is unavailable, + // node failed // eventOperations in the ndb object should be notified - theEventBuffer->completeClusterFailed(); + theEventBuffer->report_node_failure(node_id); + if(!TransporterFacade::instance()->theClusterMgr->isClusterAlive()) + { + // cluster is unavailable, + // eventOperations in the ndb object should be notified + theEventBuffer->completeClusterFailed(); + } } abortTransactionsAfterNodeFailure(node_id); diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 8103d1d179e..c6a25ced3d3 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -59,6 +59,8 @@ typedef struct ErrorBundle { #define OE ndberror_cl_schema_object_already_exists +#define IT ndberror_cl_internal_temporary + /* default mysql error code for unmapped codes */ #define DMEC -1 @@ -208,6 +210,7 @@ ErrorBundle ErrorCodes[] = { /** * OverloadError */ + { 701, DMEC, OL, "System busy with other schema operation" }, { 410, DMEC, OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" }, { 677, DMEC, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" }, { 891, DMEC, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" }, @@ -215,6 +218,10 @@ ErrorBundle ErrorCodes[] = { { 4006, DMEC, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, + /* + * Internal Temporary + */ + { 702, DMEC, IT, "Request to non-master" }, /** * Internal errors @@ -240,7 +247,6 @@ ErrorBundle ErrorCodes[] = { { 290, DMEC, IE, "Corrupt key in TC, unable to xfrm" }, { 631, DMEC, IE, "631" }, { 632, DMEC, IE, "632" }, - { 702, DMEC, IE, "Request to non-master" }, { 706, DMEC, IE, "Inconsistency during table creation" }, { 809, DMEC, IE, "809" }, { 812, DMEC, IE, "812" }, @@ -335,7 +341,6 @@ ErrorBundle ErrorCodes[] = { /** * SchemaError */ - { 701, DMEC, SE, "System busy with other schema operation" }, { 703, DMEC, SE, "Invalid table format" }, { 704, DMEC, SE, "Attribute name too long" }, { 705, DMEC, SE, "Table name too long" }, @@ -629,6 +634,7 @@ ErrorStatusClassification StatusClassificationMapping[] = { { ST_T, OL, "Overload error"}, { ST_T, TO, "Timeout expired"}, { ST_T, NS, "Node shutdown"}, + { ST_T, IT, "Internal temporary"}, { ST_U , UR, "Unknown result error"}, { ST_U , UE, "Unknown error code"}, diff --git a/storage/ndb/tools/restore/consumer.cpp b/storage/ndb/tools/restore/consumer.cpp index b130c4998d5..008eed724ee 100644 --- a/storage/ndb/tools/restore/consumer.cpp +++ b/storage/ndb/tools/restore/consumer.cpp @@ -16,6 +16,8 @@ #include "consumer.hpp" +const char *Ndb_apply_table= NDB_APPLY_TABLE; + #ifdef USE_MYSQL int BackupConsumer::create_table_string(const TableS & table, diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index 9cfbd3ca592..f40492a76a1 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -19,6 +19,9 @@ #include "Restore.hpp" +#include "../../../../sql/ha_ndbcluster_tables.h" +extern const char *Ndb_apply_table; + class BackupConsumer { public: virtual ~BackupConsumer() { } @@ -32,6 +35,7 @@ public: virtual void logEntry(const LogEntry &){} virtual void endOfLogEntrys(){} virtual bool finalize_table(const TableS &){return true;} + virtual bool update_apply_status(const RestoreMetaData &metaData){return true;} }; #endif diff --git a/storage/ndb/tools/restore/consumer_printer.cpp b/storage/ndb/tools/restore/consumer_printer.cpp index 0aa5b521d29..36b2bf29a64 100644 --- a/storage/ndb/tools/restore/consumer_printer.cpp +++ b/storage/ndb/tools/restore/consumer_printer.cpp @@ -53,3 +53,11 @@ BackupPrinter::endOfLogEntrys() << " to stdout." << endl; } } +bool +BackupPrinter::update_apply_status(const RestoreMetaData &metaData) +{ + if (m_print) + { + } + return true; +} diff --git a/storage/ndb/tools/restore/consumer_printer.hpp b/storage/ndb/tools/restore/consumer_printer.hpp index e47bc56f874..2433a8511aa 100644 --- a/storage/ndb/tools/restore/consumer_printer.hpp +++ b/storage/ndb/tools/restore/consumer_printer.hpp @@ -41,6 +41,7 @@ public: virtual void logEntry(const LogEntry &); virtual void endOfTuples() {}; virtual void endOfLogEntrys(); + virtual bool update_apply_status(const RestoreMetaData &metaData); bool m_print; bool m_print_log; bool m_print_data; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index fd9daf2856d..2fc7b193199 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -32,7 +32,7 @@ BackupRestore::init() { release(); - if (!m_restore && !m_restore_meta) + if (!m_restore && !m_restore_meta && !m_restore_epoch) return true; m_cluster_connection = new Ndb_cluster_connection(g_connect_string); @@ -288,6 +288,61 @@ BackupRestore::object(Uint32 type, const void * ptr) } bool +BackupRestore::update_apply_status(const RestoreMetaData &metaData) +{ + if (!m_restore_epoch) + return true; + + bool result= false; + + m_ndb->setDatabaseName(NDB_REP_DB); + m_ndb->setSchemaName("def"); + + NdbDictionary::Dictionary *dict= m_ndb->getDictionary(); + const NdbDictionary::Table *ndbtab= dict->getTable(Ndb_apply_table); + if (!ndbtab) + { + err << Ndb_apply_table << ": " + << dict->getNdbError() << endl; + return false; + } + Uint32 server_id= 0; + Uint64 epoch= metaData.getStopGCP(); + NdbTransaction * trans= m_ndb->startTransaction(); + if (!trans) + { + err << Ndb_apply_table << ": " + << m_ndb->getNdbError() << endl; + return false; + } + NdbOperation * op= trans->getNdbOperation(ndbtab); + if (!op) + { + err << Ndb_apply_table << ": " + << trans->getNdbError() << endl; + goto err; + } + if (op->writeTuple() || + op->equal(0u, (const char *)&server_id, sizeof(server_id)) || + op->setValue(1u, (const char *)&epoch, sizeof(epoch))) + { + err << Ndb_apply_table << ": " + << op->getNdbError() << endl; + goto err; + } + if (trans->execute(NdbTransaction::Commit)) + { + err << Ndb_apply_table << ": " + << trans->getNdbError() << endl; + goto err; + } + result= true; +err: + m_ndb->closeTransaction(trans); + return result; +} + +bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; @@ -344,7 +399,43 @@ BackupRestore::table(const TableS & table){ err << "Unable to find table: " << split[2].c_str() << endl; return false; } - if(m_restore_meta){ + if(m_restore_meta) + { + if (tab->getFrmData()) + { + // a MySQL Server table is restored, thus an event should be created + BaseString event_name("REPL$"); + event_name.append(split[0].c_str()); + event_name.append("/"); + event_name.append(split[2].c_str()); + + NdbDictionary::Event my_event(event_name.c_str()); + my_event.setTable(*tab); + my_event.addTableEvent(NdbDictionary::Event::TE_ALL); + + // add all columns to the event + for(int a= 0; a < tab->getNoOfColumns(); a++) + { + my_event.addEventColumn(a); + } + + while ( dict->createEvent(my_event) ) // Add event to database + { + if (dict->getNdbError().classification == NdbError::SchemaObjectExists) + { + info << "Event for table " << table.getTableName() + << " already exists, removing.\n"; + if (!dict->dropEvent(my_event.getName())) + continue; + } + err << "Create table event for " << table.getTableName() << " failed: " + << dict->getNdbError() << endl; + dict->dropTable(split[2].c_str()); + return false; + } + info << "Successfully restored table event " << event_name << endl ; + } + m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false); } const NdbDictionary::Table* null = 0; diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp index 385a792b4ca..73aabdb3acf 100644 --- a/storage/ndb/tools/restore/consumer_restore.hpp +++ b/storage/ndb/tools/restore/consumer_restore.hpp @@ -40,6 +40,7 @@ public: m_restore = false; m_restore_meta = false; m_no_restore_disk = false; + m_restore_epoch = false; m_parallelism = parallelism; m_callback = 0; m_free_callback = 0; @@ -63,12 +64,14 @@ public: virtual void logEntry(const LogEntry &); virtual void endOfLogEntrys(); virtual bool finalize_table(const TableS &); + virtual bool update_apply_status(const RestoreMetaData &metaData); void connectToMysql(); Ndb * m_ndb; Ndb_cluster_connection * m_cluster_connection; bool m_restore; bool m_restore_meta; bool m_no_restore_disk; + bool m_restore_epoch; Uint32 m_logCount; Uint32 m_dataCount; diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index ee934d6ccda..4ca26c7c683 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -42,6 +42,7 @@ NDB_STD_OPTS_VARS; /** * print and restore flags */ +static bool ga_restore_epoch = false; static bool ga_restore = false; static bool ga_print = false; static int _print = 0; @@ -76,6 +77,12 @@ static struct my_option my_long_options[] = "Dont restore disk objects (tablespace/logfilegroups etc)", (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "restore_epoch", 'e', + "Restore epoch info into the status table. Convenient on a MySQL Cluster " + "replication slave, for starting replication. The row in " + NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.", + (gptr*) &ga_restore_epoch, (gptr*) &ga_restore_epoch, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "No of parallel transactions during restore of data." "(parallelism can be 1 to 1024)", @@ -93,6 +100,9 @@ static struct my_option my_long_options[] = { "print_log", 259, "Print log to stdout", (gptr*) &_print_log, (gptr*) &_print_log, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "backup_path", 260, "Path to backup files", + (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "dont_ignore_systab_0", 'f', "Experimental. Do not ignore system table during restore.", (gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0, @@ -197,6 +207,11 @@ readArguments(int *pargc, char*** pargv) restore->m_no_restore_disk = true; } + if (ga_restore_epoch) + { + restore->m_restore_epoch = true; + } + { BackupConsumer * c = printer; g_consumers.push_back(c); @@ -229,7 +244,9 @@ checkSysTable(const char *tableName) (strcmp(tableName, "SYSTAB_0") != 0 && strcmp(tableName, "NDB$EVENTS_0") != 0 && strcmp(tableName, "sys/def/SYSTAB_0") != 0 && - strcmp(tableName, "sys/def/NDB$EVENTS_0") != 0); + strcmp(tableName, "sys/def/NDB$EVENTS_0") != 0 && + strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) != 0 && + strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE) != 0); } static void @@ -444,6 +461,16 @@ main(int argc, char** argv) } } } + if (ga_restore_epoch) + { + for (i= 0; i < g_consumers.size(); i++) + if (!g_consumers[i]->update_apply_status(metaData)) + { + ndbout_c("Restore: Failed to restore epoch"); + return -1; + } + } + clearConsumers(); return NDBT_ProgramExit(NDBT_OK); } // main |