diff options
author | unknown <tsmith/tim@siva.hindu.god> | 2006-07-15 00:33:24 -0600 |
---|---|---|
committer | unknown <tsmith/tim@siva.hindu.god> | 2006-07-15 00:33:24 -0600 |
commit | 690101ad882c603a00981e6e5be58f61888d8ad0 (patch) | |
tree | a5e74a70e567b375faa67837da8854bcf895758f | |
parent | d6e3a9ddcb6a877feb65a98953b3ec9e6426be47 (diff) | |
parent | 906cd90d936e384af54c1f4e75ebb56bd2f6b148 (diff) | |
download | mariadb-git-690101ad882c603a00981e6e5be58f61888d8ad0.tar.gz |
Merge bk-internal.mysql.com:/home/bk/mysql-5.1
into siva.hindu.god:/usr/home/tim/m/bk/merge-51
(which is mysql-5.1-new-maint team tree)
include/my_base.h:
Auto merged
mysql-test/extra/rpl_tests/rpl_loaddata.test:
Auto merged
mysql-test/lib/mtr_cases.pl:
Auto merged
mysql-test/r/rpl_loaddata.result:
Auto merged
mysql-test/r/sp-security.result:
Auto merged
mysql-test/t/sp-security.test:
Auto merged
mysql-test/lib/mtr_report.pl:
merge of mysql-5.1 -> mysql-5.1-new-maint
mysql-test/mysql-test-run.pl:
merge of mysql-5.1 -> mysql-5.1-new-maint
128 files changed, 5572 insertions, 2136 deletions
diff --git a/.bzrignore b/.bzrignore index 2fdd5b3e68f..777700a202f 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1093,6 +1093,7 @@ scripts/mysql_secure_installation scripts/mysql_setpermission scripts/mysql_tableinfo scripts/mysql_upgrade +scripts/mysql_upgrade_shell scripts/mysql_zap scripts/mysqlaccess scripts/mysqlbug diff --git a/configure.in b/configure.in index 1306a3cc74d..fe5206e3f42 100644 --- a/configure.in +++ b/configure.in @@ -358,18 +358,18 @@ fi AC_SUBST(LD_VERSION_SCRIPT) # Avoid bug in fcntl on some versions of linux -AC_MSG_CHECKING("if we should use 'skip-locking' as default for $target_os") +AC_MSG_CHECKING([if we should use 'skip-external-locking' as default for $target_os]) # Any variation of Linux if expr "$target_os" : "[[Ll]]inux.*" > /dev/null then - MYSQLD_DEFAULT_SWITCHES="--skip-locking" + MYSQLD_DEFAULT_SWITCHES="--skip-external-locking" TARGET_LINUX="true" - AC_MSG_RESULT("yes") + AC_MSG_RESULT([yes]) AC_DEFINE([TARGET_OS_LINUX], [1], [Whether we build for Linux]) else MYSQLD_DEFAULT_SWITCHES="" TARGET_LINUX="false" - AC_MSG_RESULT("no") + AC_MSG_RESULT([no]) fi AC_SUBST(MYSQLD_DEFAULT_SWITCHES) AC_SUBST(TARGET_LINUX) diff --git a/include/my_base.h b/include/my_base.h index 4bcfb03affb..d75df093a11 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -155,7 +155,16 @@ enum ha_extra_function { Mark the table as a log table. For some handlers (e.g. CSV) this results in a special locking for the table. */ - HA_EXTRA_MARK_AS_LOG_TABLE + HA_EXTRA_MARK_AS_LOG_TABLE, + /* + Informs handler that write_row() which tries to insert new row into the + table and encounters some already existing row with same primary/unique + key can replace old row with new row instead of reporting error (basically + it informs handler that we do REPLACE instead of simple INSERT). + Off by default. + */ + HA_EXTRA_WRITE_CAN_REPLACE, + HA_EXTRA_WRITE_CANNOT_REPLACE }; /* The following is parameter to ha_panic() */ diff --git a/include/my_global.h b/include/my_global.h index 21b2251af1d..539a2ea644f 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -1159,8 +1159,8 @@ do { doubleget_union _tmp; \ #define doublestore(T,V) do { *((long *) T) = ((doubleget_union *)&V)->m[0]; \ *(((long *) T)+1) = ((doubleget_union *)&V)->m[1]; \ } while (0) -#define float4get(V,M) do { *((long *) &(V)) = *((long*) (M)); } while(0) -#define float8get(V,M) doubleget((V),(M)) +#define float4get(V,M) do { *((float *) &(V)) = *((float*) (M)); } while(0) +#define float8get(V,M) doubleget((V),(M)) #define float4store(V,M) memcpy((byte*) V,(byte*) (&M),sizeof(float)) #define floatstore(T,V) memcpy((byte*)(T), (byte*)(&V),sizeof(float)) #define floatget(V,M) memcpy((byte*) &V,(byte*) (M),sizeof(float)) diff --git a/include/my_handler.h b/include/my_handler.h index d531e0fb3e1..61665090853 100644 --- a/include/my_handler.h +++ b/include/my_handler.h @@ -18,7 +18,6 @@ #ifndef _my_handler_h #define _my_handler_h -#include "my_global.h" #include "my_base.h" #include "m_ctype.h" #include "myisampack.h" diff --git a/include/my_sys.h b/include/my_sys.h index f2b08e2b372..4b31f6bcd2b 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -587,7 +587,7 @@ extern gptr _my_memdup(const byte *from,uint length, const char *sFile, uint uLine,myf MyFlag); extern my_string _my_strdup(const char *from, const char *sFile, uint uLine, myf MyFlag); -extern char *_my_strndup(const byte *from, uint length, +extern char *_my_strndup(const char *from, uint length, const char *sFile, uint uLine, myf MyFlag); diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 225b3926aa7..2a7850afe95 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -2385,10 +2385,9 @@ static void net_store_datetime(NET *net, MYSQL_TIME *tm) static void store_param_date(NET *net, MYSQL_BIND *param) { - MYSQL_TIME *tm= (MYSQL_TIME *) param->buffer; - tm->hour= tm->minute= tm->second= 0; - tm->second_part= 0; - net_store_datetime(net, tm); + MYSQL_TIME tm= *((MYSQL_TIME *) param->buffer); + tm.hour= tm.minute= tm.second= tm.second_part= 0; + net_store_datetime(net, &tm); } static void store_param_datetime(NET *net, MYSQL_BIND *param) diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test index 6f7990893f0..993b3fbf634 100644 --- a/mysql-test/extra/binlog_tests/binlog.test +++ b/mysql-test/extra/binlog_tests/binlog.test @@ -49,3 +49,35 @@ show binlog events in 'master-bin.000001' from 102; --replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events in 'master-bin.000002' from 102; + +# Test of a too big SET INSERT_ID: see if the truncated value goes +# into binlog (right), or the too big value (wrong); we look at the +# binlog further down with SHOW BINLOG EVENTS. +reset master; +create table t1 (id tinyint auto_increment primary key); +set insert_id=128; +insert into t1 values(null); +select * from t1; +drop table t1; + +# Test of binlogging of INSERT_ID with INSERT DELAYED +create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; +# First, avoid BUG#20627: +set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; +# Verify that only one INSERT_ID event is binlogged. +insert delayed into t1 values (207); + +# We use sleeps between statements, that's the only way to get a +# repeatable binlog in a normal test run and under Valgrind. +# It may be that the "binlog missing rows" of BUG#20821 shows up +# here. +sleep 2; +insert delayed into t1 values (null); +sleep 2; +insert delayed into t1 values (300); +sleep 2; # time for the delayed queries to reach disk +select * from t1; +--replace_column 2 # 5 # +--replace_regex /table_id: [0-9]+/table_id: #/ +show binlog events from 102; +drop table t1; diff --git a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test index b5052620f91..bdc49573ae5 100644 --- a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test +++ b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test @@ -29,7 +29,7 @@ insert into t1 values(1); insert into t2 select * from t1; commit; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -43,7 +43,7 @@ insert into t2 select * from t1; # should say some changes to non-transact1onal tables couldn't be rolled back rollback; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -59,7 +59,7 @@ insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -77,7 +77,7 @@ insert into t1 values(7); commit; select a from t1 order by a; # check that savepoints work :) ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -99,7 +99,7 @@ connection con2; # so SHOW BINLOG EVENTS may come before con1 does the loggin. To be sure that # logging has been done, we use a user lock. select get_lock("a",10); ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -111,7 +111,7 @@ reset master; insert into t1 values(9); insert into t2 select * from t1; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -124,13 +124,13 @@ reset master; insert into t1 values(10); # first make t1 non-empty begin; insert into t2 select * from t1; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; insert into t1 values(11); commit; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -149,7 +149,7 @@ insert into t1 values(12); insert into t2 select * from t1; commit; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -162,7 +162,7 @@ insert into t1 values(13); insert into t2 select * from t1; rollback; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -178,7 +178,7 @@ insert into t2 select * from t1; rollback to savepoint my_savepoint; commit; ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -196,7 +196,7 @@ insert into t1 values(18); commit; select a from t1 order by a; # check that savepoints work :) ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; @@ -257,7 +257,7 @@ insert into t2 values (3); disconnect con2; connection con3; select get_lock("lock1",60); ---replace_column 5 # +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; do release_lock("lock1"); @@ -324,6 +324,7 @@ CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * ROLLBACK; SELECT * from t2; DROP TABLE t1,t2; +--replace_column 2 # 5 # --replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\// show binlog events from 102; diff --git a/mysql-test/extra/rpl_tests/rpl_auto_increment.test b/mysql-test/extra/rpl_tests/rpl_auto_increment.test index 42f8fcfc5fb..dbae317e8ab 100644 --- a/mysql-test/extra/rpl_tests/rpl_auto_increment.test +++ b/mysql-test/extra/rpl_tests/rpl_auto_increment.test @@ -104,9 +104,47 @@ select * from t1; sync_slave_with_master; select * from t1; -connection master; +# Test for BUG#20524 "auto_increment_* not observed when inserting +# a too large value". When an autogenerated value was bigger than the +# maximum possible value of the field, it was truncated to that max +# possible value, without being "rounded down" to still honour +# auto_increment_* variables. + +connection master; drop table t1; +create table t1 (a tinyint not null auto_increment primary key) engine=myisam; +insert into t1 values(103); +set auto_increment_increment=11; +set auto_increment_offset=4; +insert into t1 values(null); +insert into t1 values(null); +--error 1062 +insert into t1 values(null); +select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a; + +# same but with a larger value +create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam; +set auto_increment_increment=10; +set auto_increment_offset=1; +set insert_id=1000; +insert into t2 values(null); +select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a; + +# An offset so big that even first value does not fit +create table t3 like t1; +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t3 values(null); +select * from t3 order by a; +sync_slave_with_master; +select * from t1 order by a; +select * from t2 order by a; +select * from t3 order by a; + +connection master; + +drop table t1,t2,t3; # End cleanup sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_insert_id.test b/mysql-test/extra/rpl_tests/rpl_insert_id.test index 68e39c54381..652ff8881c5 100644 --- a/mysql-test/extra/rpl_tests/rpl_insert_id.test +++ b/mysql-test/extra/rpl_tests/rpl_insert_id.test @@ -144,6 +144,23 @@ insert into t1 (last_id) values (bug15728()); # This should be exactly one greater than in the previous call. select last_insert_id(); +# BUG#20339 - stored procedure using LAST_INSERT_ID() does not +# replicate statement-based +--disable_warnings +drop procedure if exists foo; +--enable_warnings +delimiter |; +create procedure foo() +begin + declare res int; + insert into t2 (last_id) values (bug15728()); + insert into t1 (last_id) values (bug15728()); +end| +delimiter ;| +call foo(); + +select * from t1; +select * from t2; save_master_pos; connection slave; sync_with_master; @@ -153,8 +170,107 @@ connection master; drop function bug15728; drop function bug15728_insert; -drop table t1, t2; +drop table t1; +drop procedure foo; + +# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in +# auto_increment breaks binlog + +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); + +# First, test that we do not call restore_auto_increment() too early +# in write_record(): +set sql_log_bin=0; +insert into t1 values(null,100); +replace into t1 values(null,50),(null,100),(null,150); +select * from t1 order by n; +truncate table t1; +set sql_log_bin=1; + +insert into t1 values(null,100); +select * from t1 order by n; +sync_slave_with_master; +# make slave's table autoinc counter bigger +insert into t1 values(null,200),(null,300); +delete from t1 where b <> 100; +# check that slave's table content is identical to master +select * from t1 order by n; +# only the auto_inc counter differs. + +connection master; +replace into t1 values(null,100),(null,350); +select * from t1 order by n; +sync_slave_with_master; +select * from t1 order by n; + +# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE + +# We first check that if we update a row using a value larger than the +# table's counter, the counter for next row is bigger than the +# after-value of the updated row. +connection master; +insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000; +select * from t1 order by n; +sync_slave_with_master; +select * from t1 order by n; + +# and now test for the bug: +connection master; +drop table t1; +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); +insert into t1 values(null,100); +select * from t1 order by n; +sync_slave_with_master; +insert into t1 values(null,200),(null,300); +delete from t1 where b <> 100; +select * from t1 order by n; + +connection master; +insert into t1 values(null,100),(null,350) on duplicate key update n=2; +select * from t1 order by n; +sync_slave_with_master; +select * from t1 order by n; + +connection master; +drop table t1; # End of 5.0 tests +# Test for BUG#20341 "stored function inserting into one +# auto_increment puts bad data in slave" + +truncate table t2; +create table t1 (id tinyint primary key); # no auto_increment + +delimiter |; +create function insid() returns int +begin + insert into t2 (last_id) values (0); + return 0; +end| +delimiter ;| +set sql_log_bin=0; +insert into t2 (id) values(1),(2),(3); +delete from t2; +set sql_log_bin=1; +#inside SELECT, then inside INSERT +select insid(); +set sql_log_bin=0; +insert into t2 (id) values(5),(6),(7); +delete from t2 where id>=5; +set sql_log_bin=1; +insert into t1 select insid(); +select * from t1; +select * from t2; + +sync_slave_with_master; +select * from t1; +select * from t2; + +connection master; +drop table t1, t2; +drop function insid; + sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata.test b/mysql-test/extra/rpl_tests/rpl_loaddata.test index 2c23777dcba..08e89c20973 100644 --- a/mysql-test/extra/rpl_tests/rpl_loaddata.test +++ b/mysql-test/extra/rpl_tests/rpl_loaddata.test @@ -20,8 +20,11 @@ connection slave; reset master; connection master; +select last_insert_id(); create table t1(a int not null auto_increment, b int, primary key(a) ); load data infile '../std_data_ln/rpl_loaddata.dat' into table t1; +# verify that LAST_INSERT_ID() is set by LOAD DATA INFILE +select last_insert_id(); create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60)); load data infile '../std_data_ln/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl index 9d5ddfd526e..b0ba27e6736 100644 --- a/mysql-test/lib/mtr_cases.pl +++ b/mysql-test/lib/mtr_cases.pl @@ -40,6 +40,23 @@ sub collect_test_cases ($) { opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); + # ---------------------------------------------------------------------- + # Disable some tests listed in disabled.def + # ---------------------------------------------------------------------- + my %disabled; + if ( open(DISABLED, "$testdir/disabled.def" ) ) + { + while ( <DISABLED> ) + { + chomp; + if ( /^\s*(\S+)\s*:\s*(.*?)\s*$/ ) + { + $disabled{$1}= $2; + } + } + close DISABLED; + } + if ( @::opt_cases ) { foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort @@ -103,30 +120,13 @@ sub collect_test_cases ($) { } } - collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,{}, + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,\%disabled, $component_id); } closedir TESTDIR; } else { - # ---------------------------------------------------------------------- - # Disable some tests listed in disabled.def - # ---------------------------------------------------------------------- - my %disabled; - if ( ! $::opt_ignore_disabled_def and open(DISABLED, "$testdir/disabled.def" ) ) - { - while ( <DISABLED> ) - { - chomp; - if ( /^\s*(\S+)\s*:\s*(.*?)\s*$/ ) - { - $disabled{$1}= $2; - } - } - close DISABLED; - } - foreach my $elem ( sort readdir(TESTDIR) ) { my $component_id= undef; my $tname= undef; @@ -418,20 +418,35 @@ sub collect_one_test_case($$$$$$$) { } # FIXME why this late? + my $marked_as_disabled= 0; if ( $disabled->{$tname} ) { - $tinfo->{'skip'}= 1; - $tinfo->{'disable'}= 1; # Sub type of 'skip' - $tinfo->{'comment'}= $disabled->{$tname} if $disabled->{$tname}; + $marked_as_disabled= 1; + $tinfo->{'comment'}= $disabled->{$tname}; } if ( -f $disabled_file ) { - $tinfo->{'skip'}= 1; - $tinfo->{'disable'}= 1; # Sub type of 'skip' + $marked_as_disabled= 1; $tinfo->{'comment'}= mtr_fromfile($disabled_file); } + # If test was marked as disabled, either opt_enable_disabled is off and then + # we skip this test, or it is on and then we run this test but warn + + if ( $marked_as_disabled ) + { + if ( $::opt_enable_disabled ) + { + $tinfo->{'dont_skip_though_disabled'}= 1; + } + else + { + $tinfo->{'skip'}= 1; + $tinfo->{'disable'}= 1; # Sub type of 'skip' + } + } + if ( $component_id eq 'im' ) { if ( $::glob_use_embedded_server ) diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl index e418af57d3a..6e3796133f2 100644 --- a/mysql-test/lib/mtr_report.pl +++ b/mysql-test/lib/mtr_report.pl @@ -10,6 +10,7 @@ sub mtr_report_test_name($); sub mtr_report_test_passed($); sub mtr_report_test_failed($); sub mtr_report_test_skipped($); +sub mtr_report_test_not_skipped_though_disabled($); sub mtr_show_failed_diff ($); sub mtr_report_stats ($); @@ -101,6 +102,23 @@ sub mtr_report_test_skipped ($) { } } +sub mtr_report_tests_not_skipped_though_disabled ($) { + my $tests= shift; + + if ( $::opt_enable_disabled ) + { + my @disabled_tests= grep {$_->{'dont_skip_though_disabled'}} @$tests; + if ( @disabled_tests ) + { + print "\nTest(s) which will be run though they are marked as disabled:\n"; + foreach my $tinfo ( sort {$a->{'name'} cmp $b->{'name'}} @disabled_tests ) + { + printf " %-20s : %s\n", $tinfo->{'name'}, $tinfo->{'comment'}; + } + } + } +} + sub mtr_report_test_passed ($) { my $tinfo= shift; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index cce8decc271..850b2eb7ff3 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -216,6 +216,7 @@ our $opt_extern; our $opt_fast; our $opt_force; our $opt_reorder; +our $opt_enable_disabled; our $opt_gcov; our $opt_gcov_err; @@ -286,7 +287,7 @@ our $opt_user_test; our $opt_valgrind= 0; our $opt_valgrind_mysqld= 0; our $opt_valgrind_mysqltest= 0; -our $default_valgrind_options= "-v --show-reachable=yes"; +our $default_valgrind_options= "--show-reachable=yes"; our $opt_valgrind_options; our $opt_valgrind_path; @@ -657,6 +658,7 @@ sub command_line_setup () { 'fast' => \$opt_fast, 'netware' => \$opt_netware, 'reorder' => \$opt_reorder, + 'enable-disabled' => \$opt_enable_disabled, 'script-debug' => \$opt_script_debug, 'verbose' => \$opt_verbose, 'sleep=i' => \$opt_sleep, @@ -1914,12 +1916,12 @@ sub run_suite () { mtr_print_thick_line(); - mtr_report("Finding Tests in the '$suite' suite"); - mtr_timer_start($glob_timers,"suite", 60 * $opt_suite_timeout); mtr_report("Starting Tests in the '$suite' suite"); + mtr_report_tests_not_skipped_though_disabled($tests); + mtr_print_header(); foreach my $tinfo ( @$tests ) @@ -3612,9 +3614,16 @@ sub run_check_testcase ($$) { } +sub generate_cmdline_mysqldump ($) { + my($info) = @_; + return + "$exe_mysqldump --no-defaults -uroot " . + "--port=$info->[0]->{'port'} " . + "--socket=$info->[0]->{'path_sock'} --password="; +} + sub run_mysqltest ($) { my $tinfo= shift; - my $cmdline_mysqlcheck= "$exe_mysqlcheck --no-defaults -uroot " . "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'} --password="; @@ -3624,17 +3633,15 @@ sub run_mysqltest ($) { " --debug=d:t:A,$opt_vardir_trace/log/mysqlcheck.trace"; } - my $cmdline_mysqldump= "$exe_mysqldump --no-defaults -uroot " . - "--port=$master->[0]->{'port'} " . - "--socket=$master->[0]->{'path_sock'} --password="; - - my $cmdline_mysqldumpslave= "$exe_mysqldump --no-defaults -uroot " . - "--socket=$slave->[0]->{'path_sock'} --password="; + my $cmdline_mysqldump= generate_cmdline_mysqldump $master; + my $cmdline_mysqldumpslave= generate_cmdline_mysqldump $slave; if ( $opt_debug ) { $cmdline_mysqldump .= - " --debug=d:t:A,$opt_vardir_trace/log/mysqldump.trace"; + " --debug=d:t:A,$opt_vardir_trace/log/mysqldump-master.trace"; + $cmdline_mysqldumpslave .= + " --debug=d:t:A,$opt_vardir_trace/log/mysqldump-slave.trace"; } my $cmdline_mysqlslap; @@ -3691,6 +3698,12 @@ sub run_mysqltest ($) { "--port=$master->[0]->{'port'} " . "--socket=$master->[0]->{'path_sock'}"; + if ( $opt_debug ) + { + $cmdline_mysql_client_test .= + " --debug=d:t:A,$opt_vardir_trace/log/mysql_client_test.trace"; + } + if ( $glob_use_embedded_server ) { $cmdline_mysql_client_test.= diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 4cbb1bece9c..b74965b706e 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -2178,12 +2178,7 @@ then USE_NDBCLUSTER_OPT= fi -# Do not automagically start daemons if we are in gdb or running only one -# test case - if [ -z "$DO_GDB" ] && [ -z "$DO_DDD" ] - then - mysql_start - fi + mysql_start $ECHO "Loading Standard Test Databases" mysql_loadstd fi diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 10f26c40553..9ab955da57c 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -143,7 +143,7 @@ explain extended select last_insert_id(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache last_insert_id() AS `last_insert_id()` +Note 1003 select last_insert_id() AS `last_insert_id()` insert into t1 set i = 254; ERROR 23000: Duplicate entry '254' for key 'PRIMARY' select last_insert_id(); @@ -153,7 +153,7 @@ insert into t1 set i = null; ERROR 23000: Duplicate entry '255' for key 'PRIMARY' select last_insert_id(); last_insert_id() -0 +255 drop table t1; create table t1 (i tinyint unsigned not null auto_increment, key (i)); insert into t1 set i = 254; @@ -181,7 +181,7 @@ insert into t1 values (NULL, 10); ERROR 23000: Duplicate entry '10' for key 'b' select last_insert_id(); last_insert_id() -0 +2 drop table t1; create table t1(a int auto_increment,b int null,primary key(a)); SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO; @@ -446,3 +446,57 @@ INSERT INTO t1 VALUES(1, 1); ALTER TABLE t1 CHANGE t1 t1 INT(10) auto_increment; ERROR 23000: ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '1' for key 'PRIMARY' DROP TABLE t1; +CREATE TABLE `t2` ( +`k` int(11) NOT NULL auto_increment, +`a` int(11) default NULL, +`c` int(11) default NULL, +PRIMARY KEY (`k`), +UNIQUE KEY `idx_1` (`a`) +) ENGINE=InnoDB; +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +insert into t2 ( a ) values ( 7 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +last_insert_id() +2 +select * from t2; +k a c +1 6 NULL +2 7 NULL +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +last_insert_id() +1 +select * from t2; +k a c +1 6 1 +2 7 NULL +insert ignore into t2 values (null,6,1),(10,8,1); +select last_insert_id(); +last_insert_id() +1 +insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1); +select last_insert_id(); +last_insert_id() +11 +select * from t2; +k a c +1 6 1 +2 7 NULL +10 8 1 +11 15 1 +12 20 1 +drop table t2; +create table t1 (a int primary key auto_increment, b int, c int, d timestamp default current_timestamp, unique(b),unique(c)); +insert into t1 values(null,1,1,now()); +insert into t1 values(null,0,0,null); +replace into t1 values(null,1,0,null); +select last_insert_id(); +last_insert_id() +3 +drop table t1; diff --git a/mysql-test/r/binlog_row_binlog.result b/mysql-test/r/binlog_row_binlog.result index 6cb086109b4..17c1d171b5c 100644 --- a/mysql-test/r/binlog_row_binlog.result +++ b/mysql-test/r/binlog_row_binlog.result @@ -235,3 +235,37 @@ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 show binlog events in 'master-bin.000002' from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Query 1 # use `test`; drop table t1 +reset master; +create table t1 (id tinyint auto_increment primary key); +set insert_id=128; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'id' at row 1 +select * from t1; +id +127 +drop table t1; +create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; +set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; +insert delayed into t1 values (207); +insert delayed into t1 values (null); +insert delayed into t1 values (300); +select * from t1; +a +207 +208 +300 +show binlog events from 102; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; drop table t1 +master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +drop table t1; diff --git a/mysql-test/r/binlog_row_mix_innodb_myisam.result b/mysql-test/r/binlog_row_mix_innodb_myisam.result index 32c21a01f27..ae66f98739d 100644 --- a/mysql-test/r/binlog_row_mix_innodb_myisam.result +++ b/mysql-test/r/binlog_row_mix_innodb_myisam.result @@ -8,12 +8,12 @@ insert into t2 select * from t1; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t2) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -25,12 +25,12 @@ Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t2) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -45,16 +45,16 @@ Warning 1196 Some non-transactional changed tables couldn't be rolled back commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 328 Table_map 1 # table_id: # (test.t1) -master-bin.000001 367 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 401 Table_map 1 # table_id: # (test.t2) -master-bin.000001 440 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 479 Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 576 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -74,18 +74,18 @@ a 7 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 328 Table_map 1 # table_id: # (test.t1) -master-bin.000001 367 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 401 Table_map 1 # table_id: # (test.t2) -master-bin.000001 440 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 479 Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 576 Table_map 1 # table_id: # (test.t1) -master-bin.000001 615 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 649 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -100,12 +100,12 @@ get_lock("a",10) 1 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t2) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -113,11 +113,11 @@ insert into t1 values(9); insert into t2 select * from t1; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Table_map 1 # table_id: # (test.t1) -master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 175 Xid 1 # COMMIT /* xid= */ -master-bin.000001 202 Table_map 1 # table_id: # (test.t2) -master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F delete from t1; delete from t2; reset master; @@ -126,24 +126,24 @@ begin; insert into t2 select * from t1; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Table_map 1 # table_id: # (test.t1) -master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 175 Xid 1 # COMMIT /* xid= */ -master-bin.000001 202 Table_map 1 # table_id: # (test.t2) -master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F insert into t1 values(11); commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Table_map 1 # table_id: # (test.t1) -master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 175 Xid 1 # COMMIT /* xid= */ -master-bin.000001 202 Table_map 1 # table_id: # (test.t2) -master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 275 Query 1 # use `test`; BEGIN -master-bin.000001 343 Table_map 1 # table_id: # (test.t1) -master-bin.000001 382 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 416 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ alter table t2 engine=INNODB; delete from t1; delete from t2; @@ -154,12 +154,12 @@ insert into t2 select * from t1; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t2) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -181,10 +181,10 @@ rollback to savepoint my_savepoint; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -202,12 +202,12 @@ a 18 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t1) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; alter table t2 engine=MyISAM; @@ -254,28 +254,28 @@ get_lock("lock1",60) 1 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Table_map 1 # table_id: # (test.t1) -master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 243 Table_map 1 # table_id: # (test.t1) -master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 316 Xid 1 # COMMIT /* xid= */ -master-bin.000001 343 Table_map 1 # table_id: # (test.t1) -master-bin.000001 382 Delete_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 421 Xid 1 # COMMIT /* xid= */ -master-bin.000001 448 Query 1 # use `test`; alter table t2 engine=MyISAM -master-bin.000001 539 Table_map 1 # table_id: # (test.t1) -master-bin.000001 578 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 612 Xid 1 # COMMIT /* xid= */ -master-bin.000001 639 Table_map 1 # table_id: # (test.t2) -master-bin.000001 678 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 712 Query 1 # use `test`; drop table t1,t2 -master-bin.000001 791 Query 1 # use `test`; create table t0 (n int) -master-bin.000001 877 Table_map 1 # table_id: # (test.t0) -master-bin.000001 916 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 950 Table_map 1 # table_id: # (test.t0) -master-bin.000001 989 Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 1023 Query 1 # use `test`; create table t2 (n int) engine=innodb +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Delete_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; drop table t1,t2 +master-bin.000001 # Query 1 # use `test`; create table t0 (n int) +master-bin.000001 # Table_map 1 # table_id: # (test.t0) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t0) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb do release_lock("lock1"); drop table t0,t2; set autocommit=0; @@ -357,83 +357,55 @@ a b DROP TABLE t1,t2; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Table_map 1 142 table_id: # (test.t1) -master-bin.000001 142 Write_rows 1 189 table_id: # flags: STMT_END_F -master-bin.000001 189 Query 1 257 use `test`; BEGIN -master-bin.000001 257 Query 1 182 use `test`; CREATE TABLE `t2` ( +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; CREATE TABLE `t2` ( `a` int(11) NOT NULL DEFAULT '0', `b` int(11) DEFAULT NULL, PRIMARY KEY (`a`) ) ENGINE=InnoDB -master-bin.000001 439 Table_map 1 222 table_id: # (test.t2) -master-bin.000001 479 Write_rows 1 260 table_id: # flags: STMT_END_F -master-bin.000001 517 Xid 1 544 COMMIT /* xid= */ -master-bin.000001 544 Query 1 630 use `test`; DROP TABLE if exists t2 -master-bin.000001 630 Table_map 1 670 table_id: # (test.t1) -master-bin.000001 670 Write_rows 1 708 table_id: # flags: STMT_END_F -master-bin.000001 708 Query 1 776 use `test`; BEGIN -master-bin.000001 776 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` ( +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2 +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2 +master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; CREATE TABLE `t2` ( `a` int(11) NOT NULL DEFAULT '0', `b` int(11) DEFAULT NULL, PRIMARY KEY (`a`) ) ENGINE=InnoDB -master-bin.000001 968 Query 1 1039 use `test`; ROLLBACK -master-bin.000001 1039 Query 1 1125 use `test`; DROP TABLE IF EXISTS t2 -master-bin.000001 1125 Query 1 1249 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 1249 Table_map 1 1289 table_id: # (test.t1) -master-bin.000001 1289 Write_rows 1 1327 table_id: # flags: STMT_END_F -master-bin.000001 1327 Query 1 1395 use `test`; BEGIN -master-bin.000001 1395 Query 1 182 use `test`; CREATE TABLE `t2` ( - `a` int(11) NOT NULL DEFAULT '0', - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=InnoDB -master-bin.000001 1577 Table_map 1 222 table_id: # (test.t2) -master-bin.000001 1617 Write_rows 1 260 table_id: # flags: STMT_END_F -master-bin.000001 1655 Xid 1 1682 COMMIT /* xid= */ -master-bin.000001 1682 Query 1 80 use `test`; TRUNCATE table t2 -master-bin.000001 1762 Xid 1 1789 COMMIT /* xid= */ -master-bin.000001 1789 Table_map 1 1829 table_id: # (test.t1) -master-bin.000001 1829 Write_rows 1 1867 table_id: # flags: STMT_END_F -master-bin.000001 1867 Query 1 1935 use `test`; BEGIN -master-bin.000001 1935 Table_map 1 40 table_id: # (test.t2) -master-bin.000001 1975 Write_rows 1 78 table_id: # flags: STMT_END_F -master-bin.000001 2013 Xid 1 2040 COMMIT /* xid= */ -master-bin.000001 2040 Query 1 2116 use `test`; DROP TABLE t2 -master-bin.000001 2116 Table_map 1 2156 table_id: # (test.t1) -master-bin.000001 2156 Write_rows 1 2194 table_id: # flags: STMT_END_F -master-bin.000001 2194 Table_map 1 2234 table_id: # (test.t1) -master-bin.000001 2234 Write_rows 1 2272 table_id: # flags: STMT_END_F -master-bin.000001 2272 Table_map 1 2312 table_id: # (test.t1) -master-bin.000001 2312 Write_rows 1 2350 table_id: # flags: STMT_END_F -master-bin.000001 2350 Query 1 2418 use `test`; BEGIN -master-bin.000001 2418 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` ( - `a` int(11) NOT NULL DEFAULT '0', - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=InnoDB -master-bin.000001 2610 Xid 1 2637 COMMIT /* xid= */ -master-bin.000001 2637 Table_map 1 2677 table_id: # (test.t1) -master-bin.000001 2677 Write_rows 1 2715 table_id: # flags: STMT_END_F -master-bin.000001 2715 Query 1 2783 use `test`; BEGIN -master-bin.000001 2783 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` ( - `a` int(11) NOT NULL DEFAULT '0', - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=InnoDB -master-bin.000001 2975 Query 1 3046 use `test`; ROLLBACK -master-bin.000001 3046 Query 1 80 use `test`; TRUNCATE table t2 -master-bin.000001 3126 Xid 1 3153 COMMIT /* xid= */ -master-bin.000001 3153 Table_map 1 3193 table_id: # (test.t1) -master-bin.000001 3193 Write_rows 1 3231 table_id: # flags: STMT_END_F -master-bin.000001 3231 Query 1 3299 use `test`; BEGIN -master-bin.000001 3299 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` ( - `a` int(11) NOT NULL DEFAULT '0', - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=InnoDB -master-bin.000001 3491 Xid 1 3518 COMMIT /* xid= */ -master-bin.000001 3518 Query 1 3622 use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Table_map 1 # table_id: # (test.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; DROP TABLE t2 +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Table_map 1 # table_id: # (test.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `test`; DROP TABLE `t1` /* generated by server */ reset master; create table t1 (a int) engine=innodb; create table t2 (a int) engine=myisam; diff --git a/mysql-test/r/binlog_stm_binlog.result b/mysql-test/r/binlog_stm_binlog.result index f9180b69ab6..4e23db4828f 100644 --- a/mysql-test/r/binlog_stm_binlog.result +++ b/mysql-test/r/binlog_stm_binlog.result @@ -145,3 +145,35 @@ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4 show binlog events in 'master-bin.000002' from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000002 # Query 1 # use `test`; drop table t1 +reset master; +create table t1 (id tinyint auto_increment primary key); +set insert_id=128; +insert into t1 values(null); +Warnings: +Warning 1264 Out of range value for column 'id' at row 1 +select * from t1; +id +127 +drop table t1; +create table t1 (a int not null auto_increment, primary key (a)) engine=myisam; +set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1; +insert delayed into t1 values (207); +insert delayed into t1 values (null); +insert delayed into t1 values (300); +select * from t1; +a +207 +208 +300 +show binlog events from 102; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key) +master-bin.000001 # Intvar 1 # INSERT_ID=127 +master-bin.000001 # Query 1 # use `test`; insert into t1 values(null) +master-bin.000001 # Query 1 # use `test`; drop table t1 +master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (207) +master-bin.000001 # Intvar 1 # INSERT_ID=208 +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (null) +master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (300) +drop table t1; diff --git a/mysql-test/r/binlog_stm_mix_innodb_myisam.result b/mysql-test/r/binlog_stm_mix_innodb_myisam.result index e836cae0b15..95b6eb953a2 100644 --- a/mysql-test/r/binlog_stm_mix_innodb_myisam.result +++ b/mysql-test/r/binlog_stm_mix_innodb_myisam.result @@ -8,10 +8,10 @@ insert into t2 select * from t1; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(1) -master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 351 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(1) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -23,10 +23,10 @@ Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(2) -master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 351 Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(2) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -41,13 +41,13 @@ Warning 1196 Some non-transactional changed tables couldn't be rolled back commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(3) -master-bin.000001 257 Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 342 Query 1 # use `test`; insert into t1 values(4) -master-bin.000001 429 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 523 Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 620 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(3) +master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint +master-bin.000001 # Query 1 # use `test`; insert into t1 values(4) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -67,14 +67,14 @@ a 7 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(5) -master-bin.000001 257 Query 1 # use `test`; savepoint my_savepoint -master-bin.000001 342 Query 1 # use `test`; insert into t1 values(6) -master-bin.000001 429 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 523 Query 1 # use `test`; rollback to savepoint my_savepoint -master-bin.000001 620 Query 1 # use `test`; insert into t1 values(7) -master-bin.000001 707 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(5) +master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint +master-bin.000001 # Query 1 # use `test`; insert into t1 values(6) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint +master-bin.000001 # Query 1 # use `test`; insert into t1 values(7) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -89,10 +89,10 @@ get_lock("a",10) 1 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(8) -master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 351 Query 1 # use `test`; ROLLBACK +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(8) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; ROLLBACK delete from t1; delete from t2; reset master; @@ -100,9 +100,9 @@ insert into t1 values(9); insert into t2 select * from t1; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; insert into t1 values(9) -master-bin.000001 189 Xid 1 # COMMIT /* xid= */ -master-bin.000001 216 Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; insert into t1 values(9) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 delete from t1; delete from t2; reset master; @@ -111,19 +111,19 @@ begin; insert into t2 select * from t1; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; insert into t1 values(10) -master-bin.000001 190 Xid 1 # COMMIT /* xid= */ -master-bin.000001 217 Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; insert into t1 values(10) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 insert into t1 values(11); commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; insert into t1 values(10) -master-bin.000001 190 Xid 1 # COMMIT /* xid= */ -master-bin.000001 217 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 311 Query 1 # use `test`; BEGIN -master-bin.000001 379 Query 1 # use `test`; insert into t1 values(11) -master-bin.000001 467 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; insert into t1 values(10) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(11) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ alter table t2 engine=INNODB; delete from t1; delete from t2; @@ -134,10 +134,10 @@ insert into t2 select * from t1; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(12) -master-bin.000001 258 Query 1 # use `test`; insert into t2 select * from t1 -master-bin.000001 352 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(12) +master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -159,9 +159,9 @@ rollback to savepoint my_savepoint; commit; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(14) -master-bin.000001 258 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(14) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; reset master; @@ -179,10 +179,10 @@ a 18 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(16) -master-bin.000001 258 Query 1 # use `test`; insert into t1 values(18) -master-bin.000001 346 Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(16) +master-bin.000001 # Query 1 # use `test`; insert into t1 values(18) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ delete from t1; delete from t2; alter table t2 engine=MyISAM; @@ -229,29 +229,29 @@ get_lock("lock1",60) 1 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 # use `test`; BEGIN -master-bin.000001 170 Query 1 # use `test`; insert into t1 values(16) -master-bin.000001 258 Query 1 # use `test`; insert into t1 values(18) -master-bin.000001 346 Xid 1 # COMMIT /* xid= */ -master-bin.000001 373 Query 1 # use `test`; delete from t1 -master-bin.000001 450 Xid 1 # COMMIT /* xid= */ -master-bin.000001 477 Query 1 # use `test`; delete from t2 -master-bin.000001 554 Xid 1 # COMMIT /* xid= */ -master-bin.000001 581 Query 1 # use `test`; alter table t2 engine=MyISAM -master-bin.000001 672 Query 1 # use `test`; insert into t1 values (1) -master-bin.000001 760 Xid 1 # COMMIT /* xid= */ -master-bin.000001 787 Query 1 # use `test`; insert into t2 values (20) -master-bin.000001 876 Query 1 # use `test`; drop table t1,t2 -master-bin.000001 955 Query 1 # use `test`; create temporary table ti (a int) engine=innodb -master-bin.000001 1065 Query 1 # use `test`; insert into ti values(1) -master-bin.000001 1152 Xid 1 # COMMIT /* xid= */ -master-bin.000001 1179 Query 1 # use `test`; create temporary table t1 (a int) engine=myisam -master-bin.000001 1289 Query 1 # use `test`; insert t1 values (1) -master-bin.000001 1372 Query 1 # use `test`; create table t0 (n int) -master-bin.000001 1458 Query 1 # use `test`; insert t0 select * from t1 -master-bin.000001 1547 Query 1 # use `test`; insert into t0 select GET_LOCK("lock1",null) -master-bin.000001 1654 Query 1 # use `test`; create table t2 (n int) engine=innodb -master-bin.000001 1754 Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti` +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; insert into t1 values(16) +master-bin.000001 # Query 1 # use `test`; insert into t1 values(18) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; delete from t1 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; delete from t2 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM +master-bin.000001 # Query 1 # use `test`; insert into t1 values (1) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; insert into t2 values (20) +master-bin.000001 # Query 1 # use `test`; drop table t1,t2 +master-bin.000001 # Query 1 # use `test`; create temporary table ti (a int) engine=innodb +master-bin.000001 # Query 1 # use `test`; insert into ti values(1) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; create temporary table t1 (a int) engine=myisam +master-bin.000001 # Query 1 # use `test`; insert t1 values (1) +master-bin.000001 # Query 1 # use `test`; create table t0 (n int) +master-bin.000001 # Query 1 # use `test`; insert t0 select * from t1 +master-bin.000001 # Query 1 # use `test`; insert into t0 select GET_LOCK("lock1",null) +master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb +master-bin.000001 # Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti` do release_lock("lock1"); drop table t0,t2; set autocommit=0; @@ -333,28 +333,28 @@ a b DROP TABLE t1,t2; show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 102 Query 1 198 use `test`; INSERT INTO t1 values (1,1),(1,2) -master-bin.000001 198 Query 1 284 use `test`; DROP TABLE if exists t2 -master-bin.000001 284 Query 1 374 use `test`; INSERT INTO t1 values (3,3) -master-bin.000001 374 Query 1 460 use `test`; DROP TABLE IF EXISTS t2 -master-bin.000001 460 Query 1 584 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 584 Query 1 674 use `test`; INSERT INTO t1 VALUES (4,4) -master-bin.000001 674 Query 1 80 use `test`; TRUNCATE table t2 -master-bin.000001 754 Xid 1 781 COMMIT /* xid= */ -master-bin.000001 781 Query 1 871 use `test`; INSERT INTO t1 VALUES (5,5) -master-bin.000001 871 Query 1 947 use `test`; DROP TABLE t2 -master-bin.000001 947 Query 1 1037 use `test`; INSERT INTO t1 values (6,6) -master-bin.000001 1037 Query 1 1171 use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb -master-bin.000001 1171 Query 1 1261 use `test`; INSERT INTO t1 values (7,7) -master-bin.000001 1261 Query 1 1351 use `test`; INSERT INTO t1 values (8,8) -master-bin.000001 1351 Query 1 1441 use `test`; INSERT INTO t1 values (9,9) -master-bin.000001 1441 Query 1 80 use `test`; TRUNCATE table t2 -master-bin.000001 1521 Xid 1 1548 COMMIT /* xid= */ -master-bin.000001 1548 Query 1 1640 use `test`; INSERT INTO t1 values (10,10) -master-bin.000001 1640 Query 1 1708 use `test`; BEGIN -master-bin.000001 1708 Query 1 94 use `test`; INSERT INTO t2 values (100,100) -master-bin.000001 1802 Xid 1 1829 COMMIT /* xid= */ -master-bin.000001 1829 Query 1 1908 use `test`; DROP TABLE t1,t2 +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (1,1),(1,2) +master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2 +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (3,3) +master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2 +master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (4,4) +master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (5,5) +master-bin.000001 # Query 1 # use `test`; DROP TABLE t2 +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (6,6) +master-bin.000001 # Query 1 # use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (7,7) +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (8,8) +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (9,9) +master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2 +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (10,10) +master-bin.000001 # Query 1 # use `test`; BEGIN +master-bin.000001 # Query 1 # use `test`; INSERT INTO t2 values (100,100) +master-bin.000001 # Xid 1 # COMMIT /* xid= */ +master-bin.000001 # Query 1 # use `test`; DROP TABLE t1,t2 reset master; create table t1 (a int) engine=innodb; create table t2 (a int) engine=myisam; diff --git a/mysql-test/r/ctype_ucs2_def.result b/mysql-test/r/ctype_ucs2_def.result index 897dbac251c..2f9dc4ae616 100644 --- a/mysql-test/r/ctype_ucs2_def.result +++ b/mysql-test/r/ctype_ucs2_def.result @@ -1,3 +1,6 @@ +show variables like 'collation_server'; +Variable_name Value +collation_server ucs2_unicode_ci show variables like "%character_set_ser%"; Variable_name Value character_set_server ucs2 diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 5f5cd94c216..1b9d02a64d0 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1558,6 +1558,8 @@ id 3 4 5 +DROP TABLE federated.t1; +DROP TABLE federated.t1; DROP TABLE IF EXISTS federated.bug_17377_table; CREATE TABLE federated.bug_17377_table ( `fld_cid` bigint(20) NOT NULL auto_increment, @@ -1601,7 +1603,120 @@ fld_cid fld_name fld_parentid fld_delt 5 Torkel 0 0 DROP TABLE federated.t1; DROP TABLE federated.bug_17377_table; -DROP TABLE federated.t1; +drop table if exists federated.t1; +create table federated.t1 (a int, b int, c int); +drop table if exists federated.t1; +drop table if exists federated.t2; +create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b; +create table federated.t2 (a int, b int); +insert into federated.t2 values (13, 17), (19, 23); +insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11); +select * from federated.t1 order by a; +a b c +1 2 2 +3 5 15 +7 11 77 +delete from federated.t1; +insert into federated.t1 (a, b) select * from federated.t2; +select * from federated.t1 order by a; +a b c +13 17 221 +19 23 437 +delete from federated.t1; +load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b); +select * from federated.t1 order by a; +a b c +3 4 12 +5 6 30 +drop tables federated.t1, federated.t2; +drop table federated.t1; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); +create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2; +id c1 c2 +9 abc def +5 opq lmn +2 test t t test +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); +create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t t test +5 opq lmn +9 abc def +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/r/func_compress.result b/mysql-test/r/func_compress.result index 8d6fa9927ce..e3d31566741 100644 --- a/mysql-test/r/func_compress.result +++ b/mysql-test/r/func_compress.result @@ -11,7 +11,7 @@ explain extended select uncompress(compress(@test_compress_string)); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))` +Note 1003 select uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))` select uncompressed_length(compress(@test_compress_string))=length(@test_compress_string); uncompressed_length(compress(@test_compress_string))=length(@test_compress_string) 1 @@ -19,7 +19,7 @@ explain extended select uncompressed_length(compress(@test_compress_string))=len id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)` +Note 1003 select (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)` select uncompressed_length(compress(@test_compress_string)); uncompressed_length(compress(@test_compress_string)) 117 diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index 96755932ca4..71df9e437ec 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -90,7 +90,7 @@ explain extended select rand(999999),rand(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache rand(999999) AS `rand(999999)`,rand() AS `rand()` +Note 1003 select rand(999999) AS `rand(999999)`,rand() AS `rand()` select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6); pi() format(sin(pi()/2),6) format(cos(pi()/2),6) format(abs(tan(pi())),6) format(cot(1),6) format(asin(1),6) format(acos(0),6) format(atan(1),6) 3.141593 1.000000 0.000000 0.000000 0.642093 1.570796 1.570796 0.785398 diff --git a/mysql-test/r/func_system.result b/mysql-test/r/func_system.result index e186afdfafa..2035f4219e0 100644 --- a/mysql-test/r/func_system.result +++ b/mysql-test/r/func_system.result @@ -41,7 +41,7 @@ explain extended select database(), user(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache database() AS `database()`,user() AS `user()` +Note 1003 select database() AS `database()`,user() AS `user()` create table t1 (version char(60)) select database(), user(), version() as 'version'; show create table t1; Table Create Table diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 5760ccdaa95..5b5bdc8fc35 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -729,7 +729,7 @@ explain extended select period_add("9602",-12),period_diff(199505,"9404"),from_d id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)` +Note 1003 select period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)` SET @TMP=NOW(); CREATE TABLE t1 (d DATETIME); INSERT INTO t1 VALUES (NOW()); diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 308a92cb800..363c5296a1b 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -386,11 +386,11 @@ show keys from v4; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment select * from information_schema.views where TABLE_NAME like "v%"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v0 select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER -NULL test v1 select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER -NULL test v2 select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER -NULL test v3 select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER -NULL test v4 select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v0 /* ALGORITHM=UNDEFINED */ select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v4 /* ALGORITHM=UNDEFINED */ select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER drop view v0, v1, v2, v3, v4; create table t1 (a int); grant select,update,insert on t1 to mysqltest_1@localhost; @@ -483,9 +483,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION; create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION; select * from information_schema.views; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER -NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER -NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER grant select (a) on test.t1 to joe@localhost with grant option; select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE @@ -705,7 +705,7 @@ Warnings: Warning 1356 View 'test.v2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them show create table v3; View Create View -v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select sql_no_cache `test`.`sub1`(1) AS `c` +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `test`.`sub1`(1) AS `c` Warnings: Warning 1356 View 'test.v3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them drop view v2; @@ -1169,7 +1169,7 @@ select * from information_schema.views where table_name='v1' or table_name='v2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE NULL test v1 NONE YES root@localhost DEFINER -NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER drop view v1, v2; drop table t1; drop user mysqltest_1@localhost; diff --git a/mysql-test/r/insert.result b/mysql-test/r/insert.result index 235c3f61fe9..b090f0f52c0 100644 --- a/mysql-test/r/insert.result +++ b/mysql-test/r/insert.result @@ -353,3 +353,18 @@ select row_count(); row_count() 1 drop table t1; +create table t1 (id int primary key auto_increment, data int, unique(data)); +insert ignore into t1 values(NULL,100),(NULL,110),(NULL,120); +insert ignore into t1 values(NULL,10),(NULL,20),(NULL,110),(NULL,120),(NULL,100),(NULL,90); +insert ignore into t1 values(NULL,130),(NULL,140),(500,110),(550,120),(450,100),(NULL,150); +select * from t1 order by id; +id data +1 100 +2 110 +3 120 +4 10 +5 20 +6 90 +7 130 +8 140 +9 150 diff --git a/mysql-test/r/lock.result b/mysql-test/r/lock.result index 079b0253ff6..7cd223197e7 100644 --- a/mysql-test/r/lock.result +++ b/mysql-test/r/lock.result @@ -68,3 +68,10 @@ ERROR HY000: Table 't2' was locked with a READ lock and can't be updated delete t2 from t1,t2 where t1.a=t2.a; ERROR HY000: Table 't2' was locked with a READ lock and can't be updated drop table t1,t2; +drop table if exists t1; +create table t1 (a int); +lock table t1 write; +flush tables with read lock; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +unlock tables; +drop table t1; diff --git a/mysql-test/r/ndb_replace.result b/mysql-test/r/ndb_replace.result index 8e85feb3bd3..23844ce3bff 100644 --- a/mysql-test/r/ndb_replace.result +++ b/mysql-test/r/ndb_replace.result @@ -31,6 +31,7 @@ SELECT * from t1 ORDER BY i; i j k 3 1 42 17 2 NULL +DROP TABLE t1; CREATE TABLE t2 (a INT(11) NOT NULL, b INT(11) NOT NULL, c INT(11) NOT NULL, @@ -52,3 +53,47 @@ SELECT * FROM t2 ORDER BY id; a b c x y z id i 1 1 1 b b b 5 2 DROP TABLE t2; +drop table if exists t1; +create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster; +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) values (4, 1), (5, 2); +select * from t1 order by pk; +pk apk data +3 3 3 +4 1 NULL +5 2 NULL +delete from t1; +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) values (1, 4), (2, 5); +select * from t1 order by pk; +pk apk data +1 4 NULL +2 5 NULL +3 3 3 +delete from t1; +insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk); +select * from t1 order by pk; +pk apk data +1 1 1 +3 4 NULL +5 6 NULL +delete from t1; +insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk); +select * from t1 order by pk; +pk apk data +1 1 1 +3 4 NULL +5 6 NULL +delete from t1; +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) select 4, 1; +replace into t1 (pk, apk) select 2, 4; +select * from t1 order by pk; +pk apk data +2 4 NULL +3 3 3 +4 1 NULL +drop table t1; +End of 5.0 tests. diff --git a/mysql-test/r/ndb_trigger.result b/mysql-test/r/ndb_trigger.result new file mode 100644 index 00000000000..27f83df70c9 --- /dev/null +++ b/mysql-test/r/ndb_trigger.result @@ -0,0 +1,119 @@ +drop table if exists t1, t2, t3; +create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb; +create table t2 (op char(1), a int not null, b decimal (63,30)); +create table t3 select 1 as i; +create trigger t1_bu before update on t1 for each row +begin +insert into t2 values ("u", old.a, old.b); +set new.b = old.b + 10; +end;// +create trigger t1_bd before delete on t1 for each row +begin +insert into t2 values ("d", old.a, old.b); +end;// +insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05); +update t1 set a=5 where a != 3; +select * from t1 order by id; +id a b +1 5 11.050000000000000000000000000000 +2 5 12.050000000000000000000000000000 +3 3 3.050000000000000000000000000000 +4 5 14.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +u 1 1.050000000000000000000000000000 +u 2 2.050000000000000000000000000000 +u 4 4.050000000000000000000000000000 +delete from t2; +update t1, t3 set a=6 where a = 5; +select * from t1 order by id; +id a b +1 6 21.050000000000000000000000000000 +2 6 22.050000000000000000000000000000 +3 3 3.050000000000000000000000000000 +4 6 24.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +u 5 11.050000000000000000000000000000 +u 5 12.050000000000000000000000000000 +u 5 14.050000000000000000000000000000 +delete from t2; +delete from t1 where a != 3; +select * from t1 order by id; +id a b +3 3 3.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +d 6 21.050000000000000000000000000000 +d 6 22.050000000000000000000000000000 +d 6 24.050000000000000000000000000000 +delete from t2; +insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05); +delete t1 from t1, t3 where a != 3; +select * from t1 order by id; +id a b +3 3 3.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +d 1 1.050000000000000000000000000000 +d 2 2.050000000000000000000000000000 +d 4 4.050000000000000000000000000000 +delete from t2; +insert into t1 values (4, 4, 4.05); +insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1; +select * from t1 order by id; +id a b +3 4 13.050000000000000000000000000000 +4 5 14.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +u 3 3.050000000000000000000000000000 +u 4 4.050000000000000000000000000000 +delete from t2; +delete from t3; +insert into t3 values (4), (3); +insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1; +select * from t1 order by id; +id a b +3 5 23.050000000000000000000000000000 +4 6 24.050000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +u 4 13.050000000000000000000000000000 +u 5 14.050000000000000000000000000000 +delete from t2; +replace into t1 (id, a) values (4, 1), (3, 1); +select * from t1 order by id; +id a b +3 1 0.000000000000000000000000000000 +4 1 0.000000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +d 5 23.050000000000000000000000000000 +d 6 24.050000000000000000000000000000 +delete from t1; +delete from t2; +insert into t1 values (3, 1, 1.05), (4, 1, 2.05); +replace into t1 (id, a) (select i, 2 from t3); +select * from t1 order by id; +id a b +3 2 0.000000000000000000000000000000 +4 2 0.000000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +d 1 1.050000000000000000000000000000 +d 1 2.050000000000000000000000000000 +delete from t1; +delete from t2; +insert into t1 values (3, 1, 1.05), (5, 2, 2.05); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a); +select * from t1 order by id; +id a b +3 4 0.000000000000000000000000000000 +5 6 0.000000000000000000000000000000 +select * from t2 order by op, a, b; +op a b +d 1 1.050000000000000000000000000000 +d 2 2.050000000000000000000000000000 +drop tables t1, t2, t3; +End of 5.0 tests diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index 5f72b229ab5..27a84f90126 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -233,7 +233,7 @@ explain extended select benchmark(1,1) from t1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1` +Note 1003 select benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1` show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index f687ab7e2c6..a7e7d3aff2c 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -644,6 +644,27 @@ SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 2' OR ARG1 != ' 1'); count(*) 4 drop table t1; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t2 ( +pk1 int(11) NOT NULL, +pk2 int(11) NOT NULL, +pk3 int(11) NOT NULL, +pk4 int(11) NOT NULL, +filler char(82), +PRIMARY KEY (pk1,pk2,pk3,pk4) +) DEFAULT CHARSET=latin1; +insert into t2 select 1, A.a+10*B.a, 432, 44, 'fillerZ' from t1 A, t1 B; +INSERT INTO t2 VALUES (2621, 2635, 0, 0,'filler'), (2621, 2635, 1, 0,'filler'), +(2621, 2635, 10, 0,'filler'), (2621, 2635, 11, 0,'filler'), +(2621, 2635, 14, 0,'filler'), (2621, 2635, 1000015, 0,'filler'); +SELECT * FROM t2 +WHERE ((((pk4 =0) AND (pk1 =2621) AND (pk2 =2635))) +OR ((pk4 =1) AND (((pk1 IN ( 7, 2, 1 ))) OR (pk1 =522)) AND ((pk2 IN ( 0, 2635)))) +) AND (pk3 >=1000000); +pk1 pk2 pk3 pk4 filler +2621 2635 1000015 0 filler +drop table t1, t2; CREATE TABLE t1 ( id int(11) NOT NULL auto_increment, status varchar(20), diff --git a/mysql-test/r/rpl_auto_increment.result b/mysql-test/r/rpl_auto_increment.result index 9984ccf51f3..083f3a4e901 100644 --- a/mysql-test/r/rpl_auto_increment.result +++ b/mysql-test/r/rpl_auto_increment.result @@ -183,3 +183,47 @@ a 32 42 drop table t1; +create table t1 (a tinyint not null auto_increment primary key) engine=myisam; +insert into t1 values(103); +set auto_increment_increment=11; +set auto_increment_offset=4; +insert into t1 values(null); +insert into t1 values(null); +insert into t1 values(null); +ERROR 23000: Duplicate entry '125' for key 'PRIMARY' +select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a; +a mod(a-@@auto_increment_offset,@@auto_increment_increment) +103 0 +114 0 +125 0 +create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam; +set auto_increment_increment=10; +set auto_increment_offset=1; +set insert_id=1000; +insert into t2 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a; +a mod(a-@@auto_increment_offset,@@auto_increment_increment) +251 0 +create table t3 like t1; +set auto_increment_increment=1000; +set auto_increment_offset=700; +insert into t3 values(null); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +select * from t3 order by a; +a +127 +select * from t1 order by a; +a +103 +114 +125 +select * from t2 order by a; +a +251 +select * from t3 order by a; +a +127 +drop table t1,t2,t3; diff --git a/mysql-test/r/rpl_get_lock.result b/mysql-test/r/rpl_get_lock.result index 26f33bfb42c..da300d99964 100644 --- a/mysql-test/r/rpl_get_lock.result +++ b/mysql-test/r/rpl_get_lock.result @@ -25,7 +25,7 @@ explain extended select is_free_lock("lock"), is_used_lock("lock"); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")` +Note 1003 select is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")` select is_free_lock("lock2"); is_free_lock("lock2") 1 diff --git a/mysql-test/r/rpl_insert.result b/mysql-test/r/rpl_insert.result new file mode 100644 index 00000000000..bcc9b176ed3 --- /dev/null +++ b/mysql-test/r/rpl_insert.result @@ -0,0 +1,16 @@ +stop slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +start slave; +CREATE SCHEMA IF NOT EXISTS mysqlslap; +USE mysqlslap; +CREATE TABLE t1 (id INT, name VARCHAR(64)); +SELECT COUNT(*) FROM mysqlslap.t1; +COUNT(*) +5000 +SELECT COUNT(*) FROM mysqlslap.t1; +COUNT(*) +5000 +DROP SCHEMA IF EXISTS mysqlslap; diff --git a/mysql-test/r/rpl_insert_id.result b/mysql-test/r/rpl_insert_id.result index 622b1489f91..dbdbcf70944 100644 --- a/mysql-test/r/rpl_insert_id.result +++ b/mysql-test/r/rpl_insert_id.result @@ -117,6 +117,14 @@ insert into t1 (last_id) values (bug15728()); select last_insert_id(); last_insert_id() 5 +drop procedure if exists foo; +create procedure foo() +begin +declare res int; +insert into t2 (last_id) values (bug15728()); +insert into t1 (last_id) values (bug15728()); +end| +call foo(); select * from t1; id last_id 1 0 @@ -124,10 +132,126 @@ id last_id 3 2 4 1 5 4 +6 3 select * from t2; id last_id 1 3 2 4 +3 5 +select * from t1; +id last_id +1 0 +2 1 +3 2 +4 1 +5 4 +6 3 +select * from t2; +id last_id +1 3 +2 4 +3 5 drop function bug15728; drop function bug15728_insert; +drop table t1; +drop procedure foo; +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); +set sql_log_bin=0; +insert into t1 values(null,100); +replace into t1 values(null,50),(null,100),(null,150); +select * from t1 order by n; +n b +2 50 +3 100 +4 150 +truncate table t1; +set sql_log_bin=1; +insert into t1 values(null,100); +select * from t1 order by n; +n b +1 100 +insert into t1 values(null,200),(null,300); +delete from t1 where b <> 100; +select * from t1 order by n; +n b +1 100 +replace into t1 values(null,100),(null,350); +select * from t1 order by n; +n b +2 100 +3 350 +select * from t1 order by n; +n b +2 100 +3 350 +insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000; +select * from t1 order by n; +n b +2 100 +4 400 +1000 350 +1001 600 +select * from t1 order by n; +n b +2 100 +4 400 +1000 350 +1001 600 +drop table t1; +create table t1 (n int primary key auto_increment not null, +b int, unique(b)); +insert into t1 values(null,100); +select * from t1 order by n; +n b +1 100 +insert into t1 values(null,200),(null,300); +delete from t1 where b <> 100; +select * from t1 order by n; +n b +1 100 +insert into t1 values(null,100),(null,350) on duplicate key update n=2; +select * from t1 order by n; +n b +2 100 +3 350 +select * from t1 order by n; +n b +2 100 +3 350 +drop table t1; +truncate table t2; +create table t1 (id tinyint primary key); +create function insid() returns int +begin +insert into t2 (last_id) values (0); +return 0; +end| +set sql_log_bin=0; +insert into t2 (id) values(1),(2),(3); +delete from t2; +set sql_log_bin=1; +select insid(); +insid() +0 +set sql_log_bin=0; +insert into t2 (id) values(5),(6),(7); +delete from t2 where id>=5; +set sql_log_bin=1; +insert into t1 select insid(); +select * from t1; +id +0 +select * from t2; +id last_id +4 0 +8 0 +select * from t1; +id +0 +select * from t2; +id last_id +4 0 +8 0 drop table t1, t2; +drop function insid; diff --git a/mysql-test/r/rpl_loaddata.result b/mysql-test/r/rpl_loaddata.result index f6c49ace94d..c22815186d1 100644 --- a/mysql-test/r/rpl_loaddata.result +++ b/mysql-test/r/rpl_loaddata.result @@ -5,8 +5,14 @@ reset slave; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; start slave; reset master; +select last_insert_id(); +last_insert_id() +0 create table t1(a int not null auto_increment, b int, primary key(a) ); load data infile '../std_data_ln/rpl_loaddata.dat' into table t1; +select last_insert_id(); +last_insert_id() +1 create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60)); load data infile '../std_data_ln/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60)); @@ -22,7 +28,7 @@ day id category name 2003-03-22 2416 a bbbbb show master status; File Position Binlog_Do_DB Binlog_Ignore_DB -slave-bin.000001 1276 +slave-bin.000001 1248 drop table t1; drop table t2; drop table t3; @@ -33,7 +39,7 @@ set global sql_slave_skip_counter=1; start slave; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1793 # # master-bin.000001 Yes Yes # 0 0 1793 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1765 # # master-bin.000001 Yes Yes # 0 0 1765 # None 0 No # set sql_log_bin=0; delete from t1; set sql_log_bin=1; @@ -43,7 +49,7 @@ change master to master_user='test'; change master to master_user='root'; show slave status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master -# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1828 # # master-bin.000001 No No # 0 0 1828 # None 0 No # +# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1800 # # master-bin.000001 No No # 0 0 1800 # None 0 No # set global sql_slave_skip_counter=1; start slave; set sql_log_bin=0; diff --git a/mysql-test/r/rpl_master_pos_wait.result b/mysql-test/r/rpl_master_pos_wait.result index e92d1ffa361..2f3e47999cf 100644 --- a/mysql-test/r/rpl_master_pos_wait.result +++ b/mysql-test/r/rpl_master_pos_wait.result @@ -11,7 +11,7 @@ explain extended select master_pos_wait('master-bin.999999',0,2); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)` +Note 1003 select master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)` select master_pos_wait('master-bin.999999',0); stop slave sql_thread; master_pos_wait('master-bin.999999',0) diff --git a/mysql-test/r/rpl_ndb_auto_inc.result b/mysql-test/r/rpl_ndb_auto_inc.result index 71217442698..dd4cc90a75f 100644 --- a/mysql-test/r/rpl_ndb_auto_inc.result +++ b/mysql-test/r/rpl_ndb_auto_inc.result @@ -71,8 +71,8 @@ a 250 251 400 +401 1000 -1001 ******* Select from Slave ************* select * from t1 ORDER BY a; @@ -83,8 +83,8 @@ a 250 251 400 +401 1000 -1001 drop table t1; create table t1 (a int not null auto_increment, primary key (a)) engine=NDB; insert into t1 values (NULL),(5),(NULL),(NULL); @@ -120,8 +120,6 @@ a 502 503 600 -603 -604 610 611 ******* Select from Slave ************* @@ -137,8 +135,6 @@ a 502 503 600 -603 -604 610 611 drop table t1; diff --git a/mysql-test/r/rpl_row_create_table.result b/mysql-test/r/rpl_row_create_table.result index 2f6fcc5c5ea..03388f59b8c 100644 --- a/mysql-test/r/rpl_row_create_table.result +++ b/mysql-test/r/rpl_row_create_table.result @@ -178,6 +178,7 @@ CREATE TABLE t8 LIKE t4; CREATE TABLE t9 LIKE tt4; CREATE TEMPORARY TABLE tt5 LIKE t4; CREATE TEMPORARY TABLE tt6 LIKE tt4; +CREATE TEMPORARY TABLE tt7 SELECT 1; **** On Master **** SHOW CREATE TABLE t8; Table t8 diff --git a/mysql-test/r/rpl_row_delayed_ins.result b/mysql-test/r/rpl_row_delayed_ins.result index 16001b96ac2..31fffeb59cc 100644 --- a/mysql-test/r/rpl_row_delayed_ins.result +++ b/mysql-test/r/rpl_row_delayed_ins.result @@ -17,8 +17,10 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4 master-bin.000001 102 Query 1 222 use `test`; create table t1(a int not null primary key) engine=myisam master-bin.000001 222 Table_map 1 261 table_id: # (test.t1) -master-bin.000001 261 Write_rows 1 305 table_id: # flags: STMT_END_F -master-bin.000001 305 Query 1 380 use `test`; flush tables +master-bin.000001 261 Write_rows 1 295 table_id: # flags: STMT_END_F +master-bin.000001 295 Table_map 1 334 table_id: # (test.t1) +master-bin.000001 334 Write_rows 1 373 table_id: # flags: STMT_END_F +master-bin.000001 373 Query 1 448 use `test`; flush tables SELECT * FROM t1 ORDER BY a; a 1 diff --git a/mysql-test/r/rpl_switch_stm_row_mixed.result b/mysql-test/r/rpl_switch_stm_row_mixed.result index 313037bb9dc..c319005b2a4 100644 --- a/mysql-test/r/rpl_switch_stm_row_mixed.result +++ b/mysql-test/r/rpl_switch_stm_row_mixed.result @@ -18,18 +18,18 @@ select @@global.binlog_format, @@session.binlog_format; ROW ROW CREATE TABLE t1 (a varchar(100)); prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_1_"; +insert into t1 values("work_2_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_3_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; -create temporary table tmp(a char(3)); -insert into tmp values("see"); +insert into t1 values(concat("for_4_",UUID())); +insert into t1 select "yesterday_5_"; +create temporary table tmp(a char(100)); +insert into tmp values("see_6_"); set binlog_format=statement; ERROR HY000: Cannot switch out of the row-based binary log format when the session has open temporary tables insert into t1 select * from tmp; @@ -55,16 +55,16 @@ select @@global.binlog_format, @@session.binlog_format; @@global.binlog_format @@session.binlog_format STATEMENT STATEMENT prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_7_"; +insert into t1 values("work_8_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work"); +insert into t1 values("work_9_"); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values("for"); -insert into t1 select "yesterday"; +insert into t1 values("for_10_"); +insert into t1 select "yesterday_11_"; set binlog_format=default; select @@global.binlog_format, @@session.binlog_format; @@global.binlog_format @@session.binlog_format @@ -75,16 +75,16 @@ select @@global.binlog_format, @@session.binlog_format; @@global.binlog_format @@session.binlog_format STATEMENT STATEMENT prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_12_"; +insert into t1 values("work_13_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work"); +insert into t1 values("work_14_"); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values("for"); -insert into t1 select "yesterday"; +insert into t1 values("for_15_"); +insert into t1 select "yesterday_16_"; set binlog_format=mixed; select @@global.binlog_format, @@session.binlog_format; @@global.binlog_format @@session.binlog_format @@ -94,40 +94,40 @@ select @@global.binlog_format, @@session.binlog_format; @@global.binlog_format @@session.binlog_format MIXED MIXED prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_17_"; +insert into t1 values("work_18_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_19_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values(concat("for_20_",UUID())); +insert into t1 select "yesterday_21_"; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_22_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; -create table t2 select UUID(); +insert into t1 values(concat("for_23_",UUID())); +insert into t1 select "yesterday_24_"; +create table t2 select rpad(UUID(),100,' '); create table t3 select 1 union select UUID(); create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3); create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); create procedure foo() begin -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",UUID())); +insert into t1 select "yesterday_27_"; end| create procedure foo2() begin -insert into t1 values(concat("emergency",UUID())); -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); +insert into t1 values(concat("emergency_28_",UUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",UUID())); set session binlog_format=row; # accepted for stored procs -insert into t1 values("more work"); +insert into t1 values("more work_31_"); set session binlog_format=mixed; end| create function foo3() returns bigint unsigned @@ -136,32 +136,231 @@ set session binlog_format=row; # rejected for stored funcs insert into t1 values("alarm"); return 100; end| +create procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end| call foo(); call foo2(); +call foo4("hello"); +call foo4(UUID()); +call foo4("world"); select foo3(); ERROR HY000: Cannot change the binary logging format inside a stored function or trigger select * from t1 where a="alarm"; a +drop function foo3; +create function foo3() returns bigint unsigned +begin +insert into t1 values("foo3_32_"); +call foo(); +return 100; +end| +insert into t2 select foo3(); +prepare stmt1 from 'insert into t2 select foo3()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; +create function foo4() returns bigint unsigned +begin +insert into t2 select foo3(); +return 100; +end| +select foo4(); +foo4() +100 +prepare stmt1 from 'select foo4()'; +execute stmt1; +foo4() +100 +execute stmt1; +foo4() +100 +deallocate prepare stmt1; +create function foo5() returns bigint unsigned +begin +insert into t2 select UUID(); +return 100; +end| +select foo5(); +foo5() +100 +prepare stmt1 from 'select foo5()'; +execute stmt1; +foo5() +100 +execute stmt1; +foo5() +100 +deallocate prepare stmt1; +create function foo6(x varchar(100)) returns bigint unsigned +begin +insert into t2 select x; +return 100; +end| +select foo6("foo6_1_"); +foo6("foo6_1_") +100 +select foo6(concat("foo6_2_",UUID())); +foo6(concat("foo6_2_",UUID())) +100 +prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))'; +execute stmt1; +foo6(concat("foo6_3_",UUID())) +100 +execute stmt1; +foo6(concat("foo6_3_",UUID())) +100 +deallocate prepare stmt1; +create view v1 as select uuid(); +create table t11 (data varchar(255)); +insert into t11 select * from v1; +insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); +prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; +create trigger t11_bi before insert on t11 for each row +begin +set NEW.data = concat(NEW.data,UUID()); +end| +insert into t11 values("try_560_"); +insert delayed into t2 values("delay_1_"); +insert delayed into t2 values(concat("delay_2_",UUID())); +insert delayed into t2 values("delay_6_"); +insert delayed into t2 values(rand()); +set @a=2.345; +insert delayed into t2 values(@a); +create table t20 select * from t1; +create table t21 select * from t2; +create table t22 select * from t3; +drop table t1,t2,t3; +create table t1 (a int primary key auto_increment, b varchar(100)); +create table t2 (a int primary key auto_increment, b varchar(100)); +create table t3 (b varchar(100)); +create function f (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +insert into t2 values(null,x); +return 1; +end| +select f("try_41_"); +f("try_41_") +1 +use mysqltest1; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; +select f("try_42_"); +f("try_42_") +1 +insert into t2 values(3,null),(4,null); +delete from t2 where a>=3; +prepare stmt1 from 'select f(?)'; +set @string="try_43_"; +insert into t1 values(null,"try_44_"); +execute stmt1 using @string; +f(?) +1 +deallocate prepare stmt1; +create table t12 select * from t1; +drop table t1; +create table t1 (a int, b varchar(100), key(a)); +select f("try_45_"); +f("try_45_") +1 +create table t13 select * from t1; +drop table t1; +create table t1 (a int primary key auto_increment, b varchar(100)); +drop function f; +create table t14 (unique (a)) select * from t2; +truncate table t2; +create function f1 (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +return 1; +end| +create function f2 (x varchar(100)) returns int deterministic +begin +insert into t2 values(null,x); +return 1; +end| +select f1("try_46_"),f2("try_47_"); +f1("try_46_") f2("try_47_") +1 1 +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; +select f1("try_48_"),f2("try_49_"); +f1("try_48_") f2("try_49_") +1 1 +insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); +drop function f2; +create function f2 (x varchar(100)) returns int deterministic +begin +declare y int; +insert into t1 values(null,x); +set y = (select count(*) from t2); +return y; +end| +select f1("try_53_"),f2("try_54_"); +f1("try_53_") f2("try_54_") +1 3 +drop function f2; +create trigger t1_bi before insert on t1 for each row +begin +insert into t2 values(null,"try_55_"); +end| +insert into t1 values(null,"try_56_"); +alter table t1 modify a int, drop primary key; +insert into t1 values(null,"try_57_"); +CREATE TEMPORARY TABLE t15 SELECT UUID(); +create table t16 like t15; +INSERT INTO t16 SELECT * FROM t15; +insert into t16 values("try_65_"); +drop table t15; +insert into t16 values("try_66_"); select count(*) from t1; count(*) -36 +7 select count(*) from t2; count(*) -1 +5 select count(*) from t3; count(*) -2 +1 select count(*) from t4; count(*) 29 select count(*) from t5; count(*) 58 +select count(*) from t11; +count(*) +8 +select count(*) from t20; +count(*) +66 +select count(*) from t21; +count(*) +19 +select count(*) from t22; +count(*) +2 +select count(*) from t12; +count(*) +4 +select count(*) from t13; +count(*) +1 +select count(*) from t14; +count(*) +4 +select count(*) from t16; +count(*) +3 show binlog events from 102; Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Query 1 # drop database if exists mysqltest1 -master-bin.000001 # Table_map 1 # table_id: # (mysql.proc) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # create database mysqltest1 master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t1 (a varchar(100)) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) @@ -178,78 +377,71 @@ master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) -master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_8_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_9_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_10_") +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_11_" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_13_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_14_") +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for") -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday" -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_15_") +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_16_" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_18_") master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_21_" master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci +master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string' master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_24_" master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t2` ( - `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT '' + `rpad(UUID(),100,' ')` varchar(100) CHARACTER SET utf8 NOT NULL DEFAULT '' ) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t3` ( `1` varbinary(108) NOT NULL DEFAULT '' ) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t4` ( `a` varchar(100) DEFAULT NULL ) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t4) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT master-bin.000001 # Query 1 # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t5) master-bin.000001 # Write_rows 1 # table_id: # master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo() begin -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",UUID())); +insert into t1 select "yesterday_27_"; end master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2() begin -insert into t1 values(concat("emergency",UUID())); -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); +insert into t1 values(concat("emergency_28_",UUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",UUID())); set session binlog_format=row; # accepted for stored procs -insert into t1 values("more work"); +insert into t1 values("more work_31_"); set session binlog_format=mixed; end master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned @@ -258,15 +450,213 @@ set session binlog_format=row; # rejected for stored funcs insert into t1 values("alarm"); return 100; end -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday" +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello'))) +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F -master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work") master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world'))) +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_" +master-bin.000001 # Query 1 # use `mysqltest1`; drop function foo3 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned +begin +insert into t1 values("foo3_32_"); +call foo(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned +begin +insert into t2 select foo3(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned +begin +insert into t2 select UUID(); +return 100; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned +begin +insert into t2 select x; +return 100; +end +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `foo6`(_latin1'foo6_1_') +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid() +master-bin.000001 # Query 1 # use `mysqltest1`; create table t11 (data varchar(255)) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11') +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row +begin +set NEW.data = concat(NEW.data,UUID()); +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; create table t20 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t21 select * from t2 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t22 select * from t3 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1,t2,t3 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; create table t3 (b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +insert into t2 values(null,x); +return 1; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Intvar 1 # INSERT_ID=3 +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_44_") +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; create table t12 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a)) +master-bin.000001 # Intvar 1 # INSERT_ID=4 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f`(_latin1'try_45_') +master-bin.000001 # Query 1 # use `mysqltest1`; create table t13 select * from t1 +master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1 +master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100)) +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f +master-bin.000001 # Query 1 # use `mysqltest1`; create table t14 (unique (a)) select * from t2 +master-bin.000001 # Query 1 # use `mysqltest1`; truncate table t2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic +begin +insert into t1 values(null,x); +return 1; +end +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +begin +insert into t2 values(null,x); +return 1; +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic +begin +declare y int; +insert into t1 values(null,x); +set y = (select count(*) from t2); +return y; +end +master-bin.000001 # Intvar 1 # INSERT_ID=4 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f1`(_latin1'try_53_') +master-bin.000001 # Intvar 1 # INSERT_ID=5 +master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f2`(_latin1'try_54_') +master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2 +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row +begin +insert into t2 values(null,"try_55_"); +end +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2) +master-bin.000001 # Write_rows 1 # table_id: # +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; alter table t1 modify a int, drop primary key +master-bin.000001 # Intvar 1 # INSERT_ID=5 +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_57_") +master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t16` ( + `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT '' +) +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) +master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16) master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F +master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_") drop database mysqltest1; diff --git a/mysql-test/r/rpl_temporary.result b/mysql-test/r/rpl_temporary.result index 01882c683a4..7e7d0cebe1d 100644 --- a/mysql-test/r/rpl_temporary.result +++ b/mysql-test/r/rpl_temporary.result @@ -76,16 +76,11 @@ drop table t1,t2; create temporary table t3 (f int); create temporary table t4 (f int); create table t5 (f int); -drop table if exists t999; -create temporary table t999 (f int); -LOAD DATA INFILE "./tmp/bl_dump_thread_id" into table t999; -drop table t999; -insert into t4 values (1); -kill `select id from information_schema.processlist where command='Binlog Dump'`; +select id from information_schema.processlist where command='Binlog Dump' into @id; +kill @id; insert into t5 select * from t4; select * from t5 /* must be 1 after reconnection */; f -1 drop temporary table t4; drop table t5; set @@session.pseudo_thread_id=100; @@ -93,6 +88,7 @@ create temporary table t101 (id int); create temporary table t102 (id int); set @@session.pseudo_thread_id=200; create temporary table t201 (id int); +create temporary table `t``201` (id int); create temporary table `#sql_not_user_table202` (id int); set @@session.pseudo_thread_id=300; create temporary table t301 (id int); diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 7237cf11fc0..f5b3fc99baf 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -607,6 +607,68 @@ DROP TABLE tyt2; DROP TABLE urkunde; SHOW TABLES FROM non_existing_database; ERROR 42000: Unknown database 'non_existing_database' +End of 4.1 tests +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +CREATE VIEW v1 AS SELECT 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE PROCEDURE p1() +BEGIN +SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1'; +PREPARE stmt FROM @s; +EXECUTE stmt; +DROP PREPARE stmt; +END | +CALL p1(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1` +DROP PROCEDURE p1; +DROP VIEW v1; +End of 5.0 tests. SHOW AUTHORS; create database mysqltest; show create database mysqltest; diff --git a/mysql-test/r/sp-security.result b/mysql-test/r/sp-security.result index 73f0eaa1513..4860058a8a1 100644 --- a/mysql-test/r/sp-security.result +++ b/mysql-test/r/sp-security.result @@ -410,3 +410,34 @@ ERROR HY000: There is no 'mysqltest_1'@'localhost' registered ---> connection: root DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C + +---> connection: mysqltest_2_con +use test; +CREATE PROCEDURE sp19857() DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END // +SHOW CREATE PROCEDURE test.sp19857; +Procedure sql_mode Create Procedure +sp19857 CREATE DEFINER=`user19857`@`localhost` PROCEDURE `sp19857`() + DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END +DROP PROCEDURE IF EXISTS test.sp19857; + +---> connection: root +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C +DROP USER user19857@localhost; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index e307bb9eafe..75a9b422691 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1019,19 +1019,19 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1` +Note 1003 select (select rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1` EXPLAIN EXTENDED SELECT (SELECT ENCRYPT('test') FROM t1) FROM t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1` +Note 1003 select (select encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1` EXPLAIN EXTENDED SELECT (SELECT BENCHMARK(1,1) FROM t1) FROM t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1` +Note 1003 select (select benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1` drop table t1; CREATE TABLE `t1` ( `mot` varchar(30) character set latin1 NOT NULL default '', @@ -1126,7 +1126,7 @@ id select_type table type possible_keys key key_len ref rows Extra 2 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 Warnings: -Note 1003 select sql_no_cache `test`.`t1`.`a` AS `a`,(select sql_no_cache (select sql_no_cache rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1` +Note 1003 select `test`.`t1`.`a` AS `a`,(select (select rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1` drop table t1; select t1.Continent, t2.Name, t2.Population from t1 LEFT JOIN t2 ON t1.Code = t2.Country where t2.Population IN (select max(t2.Population) AS Population from t2, t1 where t2.Country = t1.Code group by Continent); ERROR 42S02: Table 'test.t1' doesn't exist diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 145579201a9..2f24d7f1d52 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -295,7 +295,7 @@ create trigger trg before insert on t1 for each row set @a:=1; create trigger trg after insert on t1 for each row set @a:=1; ERROR HY000: Trigger already exists create trigger trg2 before insert on t1 for each row set @a:=1; -ERROR HY000: Trigger already exists +ERROR 42000: This version of MySQL doesn't yet support 'multiple triggers with the same action time and event for one table' create trigger trg before insert on t3 for each row set @a:=1; ERROR HY000: Trigger already exists create trigger trg2 before insert on t3 for each row set @a:=1; @@ -1078,3 +1078,15 @@ i1 43 51 DROP TABLE t1; +create trigger wont_work after update on mysql.user for each row +begin +set @a:= 1; +end| +ERROR HY000: Triggers can not be created on system tables +use mysql| +create trigger wont_work after update on event for each row +begin +set @a:= 1; +end| +ERROR HY000: Triggers can not be created on system tables +End of 5.0 tests diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result index e2b3b8f8887..a8e5fd1497c 100644 --- a/mysql-test/r/type_blob.result +++ b/mysql-test/r/type_blob.result @@ -517,7 +517,7 @@ coercibility(load_file('../../std_data/words.dat')); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))` +Note 1003 select charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))` update t1 set imagem=load_file('../../std_data/words.dat') where id=1; select if(imagem is null, "ERROR", "OK"),length(imagem) from t1 where id = 1; if(imagem is null, "ERROR", "OK") length(imagem) diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index e4b3fc2c2e2..b545db808f7 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -75,7 +75,7 @@ explain extended select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3` +Note 1003 select (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3` select @t5; @t5 1.23456 @@ -135,7 +135,7 @@ explain extended select last_insert_id(345); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache last_insert_id(345) AS `last_insert_id(345)` +Note 1003 select last_insert_id(345) AS `last_insert_id(345)` select @@IDENTITY,last_insert_id(), @@identity; @@IDENTITY last_insert_id() @@identity 345 345 345 @@ -143,7 +143,7 @@ explain extended select @@IDENTITY,last_insert_id(), @@identity; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity` +Note 1003 select 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity` set big_tables=OFF, big_tables=ON, big_tables=0, big_tables=1, big_tables="OFF", big_tables="ON"; set global concurrent_insert=2; show variables like 'concurrent_insert'; @@ -421,6 +421,28 @@ set tmp_table_size=100; set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; +select @@session.insert_id; +@@session.insert_id +1 +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; +@@session.insert_id +20 +set session last_insert_id=100; +select @@session.insert_id; +@@session.insert_id +20 +select @@session.last_insert_id; +@@session.last_insert_id +100 +select @@session.insert_id; +@@session.insert_id +20 +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; +@@session.insert_id +1 create table t1 (a int not null auto_increment, primary key(a)); create table t2 (a int not null auto_increment, primary key(a)); insert into t1 values(null),(null),(null); diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 7f3c0f79520..af131be7241 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -672,7 +672,7 @@ drop table t1; CREATE VIEW v1 (f1,f2,f3,f4) AS SELECT connection_id(), pi(), current_user(), version(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4` drop view v1; create table t1 (s1 int); create table t2 (s2 int); @@ -787,7 +787,7 @@ create function `f``1` () returns int return 5; create view v1 as select test.`f``1` (); show create view v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`f``1`() AS `test.``f````1`` ()` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`f``1`() AS `test.``f````1`` ()` select * from v1; test.`f``1` () 5 @@ -1868,14 +1868,14 @@ create table t2 (b timestamp default now()); create view v1 as select a,b,t1.a < now() from t1,t2 where t1.a < now(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now()) drop view v1; drop table t1, t2; CREATE TABLE t1 ( a varchar(50) ); CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = CURRENT_USER(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user()) DROP VIEW v1; CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = VERSION(); SHOW CREATE VIEW v1; @@ -1885,7 +1885,7 @@ DROP VIEW v1; CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = DATABASE(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database()) DROP VIEW v1; DROP TABLE t1; CREATE TABLE t1 (col1 time); @@ -2538,7 +2538,7 @@ show create view v1; drop view v1; // View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`t1`.`id` AS `id` from `t1` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1` create table t1(f1 int, f2 int); create view v1 as select ta.f1 as a, tb.f1 as b from t1 ta, t1 tb where ta.f1=tb .f1 and ta.f2=tb.f2; @@ -2683,7 +2683,7 @@ SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75; SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75) SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75; Age 42 diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test index 2674639d0ac..7cef1bad784 100644 --- a/mysql-test/t/auto_increment.test +++ b/mysql-test/t/auto_increment.test @@ -303,3 +303,50 @@ INSERT INTO t1 VALUES(1, 1); --error ER_DUP_ENTRY ALTER TABLE t1 CHANGE t1 t1 INT(10) auto_increment; DROP TABLE t1; + +# Fix for BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY +# UPDATE": now LAST_INSERT_ID() will return the id of the updated +# row. +CREATE TABLE `t2` ( + `k` int(11) NOT NULL auto_increment, + `a` int(11) default NULL, + `c` int(11) default NULL, + PRIMARY KEY (`k`), + UNIQUE KEY `idx_1` (`a`) +) ENGINE=InnoDB; + insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +insert into t2 ( a ) values ( 7 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +select * from t2; +insert into t2 ( a ) values ( 6 ) on duplicate key update c = +ifnull( c, +0 ) + 1; +select last_insert_id(); +select * from t2; + +# Test of LAST_INSERT_ID() when autogenerated will fail: +# last_insert_id() should not change +insert ignore into t2 values (null,6,1),(10,8,1); +select last_insert_id(); +# First and second autogenerated will fail, last_insert_id() should +# point to third +insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1); +select last_insert_id(); +select * from t2; + +drop table t2; + +# Test of REPLACE when it does INSERT+DELETE and not UPDATE: +# see if it sets LAST_INSERT_ID() ok +create table t1 (a int primary key auto_increment, b int, c int, d timestamp default current_timestamp, unique(b),unique(c)); +insert into t1 values(null,1,1,now()); +insert into t1 values(null,0,0,null); +# this will delete two rows +replace into t1 values(null,1,0,null); +select last_insert_id(); + +drop table t1; diff --git a/mysql-test/t/ctype_ucs2_def-master.opt b/mysql-test/t/ctype_ucs2_def-master.opt index 1f884ff1d67..a0b5b061860 100644 --- a/mysql-test/t/ctype_ucs2_def-master.opt +++ b/mysql-test/t/ctype_ucs2_def-master.opt @@ -1 +1 @@ ---default-character-set=ucs2 --default-collation=ucs2_unicode_ci +--default-collation=ucs2_unicode_ci --default-character-set=ucs2 diff --git a/mysql-test/t/ctype_ucs2_def.test b/mysql-test/t/ctype_ucs2_def.test index fb174d551cf..00f636d79dc 100644 --- a/mysql-test/t/ctype_ucs2_def.test +++ b/mysql-test/t/ctype_ucs2_def.test @@ -1,4 +1,9 @@ # +# MySQL Bug#15276: MySQL ignores collation-server +# +show variables like 'collation_server'; + +# # Bug#18004 Connecting crashes server when default charset is UCS2 # show variables like "%character_set_ser%"; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index ebe61e1af4a..81eb7e6d35c 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -29,7 +29,6 @@ rpl_ndb_ddl : BUG#18946 result file needs update + test needs to ch rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement #rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement -rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly rpl_row_func003 : BUG#19074 2006-13-04 andrei test failed rpl_sp : BUG#16456 2006-02-16 jmiller diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 5f5c8d44f35..77ca38b9587 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1256,6 +1256,10 @@ SELECT LAST_INSERT_ID(); INSERT INTO federated.t1 VALUES (); SELECT LAST_INSERT_ID(); SELECT * FROM federated.t1; +DROP TABLE federated.t1; + +connection slave; +DROP TABLE federated.t1; # # Bug#17377 Federated Engine returns wrong Data, always the rows @@ -1310,7 +1314,6 @@ select * from federated.t1 where fld_parentid=0 and fld_delt=0; DROP TABLE federated.t1; connection slave; DROP TABLE federated.bug_17377_table; -DROP TABLE federated.t1; # # Test multi updates and deletes without keys @@ -1363,4 +1366,98 @@ drop table federated.t1, federated.t2; connection master; --enable_parsing ---source include/federated_cleanup.inc +# +# Additional test for bug#18437 "Wrong values inserted with a before +# update trigger on NDB table". SQL-layer didn't properly inform +# handler about fields which were read and set in triggers. In some +# cases this resulted in incorrect (garbage) values of OLD variables +# and lost changes to NEW variables. +# Since for federated engine only operation which is affected by wrong +# fields mark-up is handler::write_row() this file constains coverage +# for ON INSERT triggers only. Tests for other types of triggers reside +# in ndb_trigger.test. +# +--disable_warnings +drop table if exists federated.t1; +--enable_warnings +create table federated.t1 (a int, b int, c int); +connection master; +--disable_warnings +drop table if exists federated.t1; +drop table if exists federated.t2; +--enable_warnings +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b; +create table federated.t2 (a int, b int); +insert into federated.t2 values (13, 17), (19, 23); +# Each of three statements should correctly set values for all three fields +# insert +insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11); +select * from federated.t1 order by a; +delete from federated.t1; +# insert ... select +insert into federated.t1 (a, b) select * from federated.t2; +select * from federated.t1 order by a; +delete from federated.t1; +# load +load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b); +select * from federated.t1 order by a; +drop tables federated.t1, federated.t2; + +connection slave; +drop table federated.t1; + +# +# BUG 19773 Crash when using multi-table updates, deletes +# with federated tables +# +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; +connection slave; +drop table federated.t1, federated.t2; + +# Test multi updates and deletes with keys +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; + +connection slave; +drop table federated.t1, federated.t2; + +source include/federated_cleanup.inc; diff --git a/mysql-test/t/insert.test b/mysql-test/t/insert.test index 3711e2986ed..0cc25469705 100644 --- a/mysql-test/t/insert.test +++ b/mysql-test/t/insert.test @@ -234,3 +234,10 @@ select row_count(); insert into t1 values (5, 5) on duplicate key update data= data + 10; select row_count(); drop table t1; + +# Test of INSERT IGNORE and re-using auto_increment values +create table t1 (id int primary key auto_increment, data int, unique(data)); +insert ignore into t1 values(NULL,100),(NULL,110),(NULL,120); +insert ignore into t1 values(NULL,10),(NULL,20),(NULL,110),(NULL,120),(NULL,100),(NULL,90); +insert ignore into t1 values(NULL,130),(NULL,140),(500,110),(550,120),(450,100),(NULL,150); +select * from t1 order by id; diff --git a/mysql-test/t/lock.test b/mysql-test/t/lock.test index 8300219b3d4..fb5e45433e9 100644 --- a/mysql-test/t/lock.test +++ b/mysql-test/t/lock.test @@ -93,3 +93,18 @@ delete t2 from t1,t2 where t1.a=t2.a; drop table t1,t2; # End of 4.1 tests + +# +# Bug#18884 "lock table + global read lock = crash" +# The bug is not repeatable, just add the test case. +# +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (a int); +lock table t1 write; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +flush tables with read lock; +unlock tables; +drop table t1; + diff --git a/mysql-test/t/ndb_replace.test b/mysql-test/t/ndb_replace.test index 94a11f7dfb2..476a607ed44 100644 --- a/mysql-test/t/ndb_replace.test +++ b/mysql-test/t/ndb_replace.test @@ -39,6 +39,7 @@ INSERT INTO t1 VALUES (1,1,23),(2,2,24); REPLACE INTO t1 (j,k) VALUES (1,42); REPLACE INTO t1 (i,j) VALUES (17,2); SELECT * from t1 ORDER BY i; +DROP TABLE t1; # bug#19906 CREATE TABLE t2 (a INT(11) NOT NULL, @@ -64,4 +65,40 @@ SELECT * FROM t2 ORDER BY id; DROP TABLE t2; +# +# Bug #20728 "REPLACE does not work correctly for NDB table with PK and +# unique index" +# +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster; +# Test for plain replace which updates pk +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) values (4, 1), (5, 2); +select * from t1 order by pk; +delete from t1; +# Another test for plain replace which doesn't touch pk +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) values (1, 4), (2, 5); +select * from t1 order by pk; +delete from t1; +# Test for load data replace which updates pk +insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk); +select * from t1 order by pk; +delete from t1; +# Now test for load data replace which doesn't touch pk +insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk); +select * from t1 order by pk; +delete from t1; +# Finally test for both types of replace ... select +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +replace into t1 (pk, apk) select 4, 1; +replace into t1 (pk, apk) select 2, 4; +select * from t1 order by pk; +# Clean-up +drop table t1; +--echo End of 5.0 tests. diff --git a/mysql-test/t/ndb_trigger.test b/mysql-test/t/ndb_trigger.test new file mode 100644 index 00000000000..2521ef17842 --- /dev/null +++ b/mysql-test/t/ndb_trigger.test @@ -0,0 +1,92 @@ +# Tests which involve triggers and NDB storage engine +--source include/have_ndb.inc +--source include/not_embedded.inc + +# +# Test for bug#18437 "Wrong values inserted with a before update +# trigger on NDB table". SQL-layer didn't properly inform handler +# about fields which were read and set in triggers. In some cases +# this resulted in incorrect (garbage) values of OLD variables and +# lost changes to NEW variables. +# You can find similar tests for ON INSERT triggers in federated.test +# since this engine so far is the only engine in MySQL which cares +# about field mark-up during handler::write_row() operation. +# + +--disable_warnings +drop table if exists t1, t2, t3; +--enable_warnings + +create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb; +create table t2 (op char(1), a int not null, b decimal (63,30)); +create table t3 select 1 as i; + +delimiter //; +create trigger t1_bu before update on t1 for each row +begin + insert into t2 values ("u", old.a, old.b); + set new.b = old.b + 10; +end;// +create trigger t1_bd before delete on t1 for each row +begin + insert into t2 values ("d", old.a, old.b); +end;// +delimiter ;// +insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05); + +# Check that usual update works as it should +update t1 set a=5 where a != 3; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check that everything works for multi-update +update t1, t3 set a=6 where a = 5; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check for delete +delete from t1 where a != 3; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check for multi-delete +insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05); +delete t1 from t1, t3 where a != 3; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check for insert ... on duplicate key update +insert into t1 values (4, 4, 4.05); +insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check for insert ... select ... on duplicate key update +delete from t3; +insert into t3 values (4), (3); +insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1; +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t2; +# Check for replace +replace into t1 (id, a) values (4, 1), (3, 1); +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t1; +delete from t2; +# Check for replace ... select ... +insert into t1 values (3, 1, 1.05), (4, 1, 2.05); +replace into t1 (id, a) (select i, 2 from t3); +select * from t1 order by id; +select * from t2 order by op, a, b; +delete from t1; +delete from t2; +# Check for load data replace +insert into t1 values (3, 1, 1.05), (5, 2, 2.05); +load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a); +select * from t1 order by id; +select * from t2 order by op, a, b; + +drop tables t1, t2, t3; + +--echo End of 5.0 tests diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index d1ce1104322..76929cf30e6 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -490,6 +490,31 @@ SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 1' OR ARG1 != ' 2'); SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 2' OR ARG1 != ' 1'); drop table t1; +# BUG#16168: Wrong range optimizer results, "Use_count: Wrong count ..." +# warnings in server stderr. +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +CREATE TABLE t2 ( + pk1 int(11) NOT NULL, + pk2 int(11) NOT NULL, + pk3 int(11) NOT NULL, + pk4 int(11) NOT NULL, + filler char(82), + PRIMARY KEY (pk1,pk2,pk3,pk4) +) DEFAULT CHARSET=latin1; + +insert into t2 select 1, A.a+10*B.a, 432, 44, 'fillerZ' from t1 A, t1 B; +INSERT INTO t2 VALUES (2621, 2635, 0, 0,'filler'), (2621, 2635, 1, 0,'filler'), + (2621, 2635, 10, 0,'filler'), (2621, 2635, 11, 0,'filler'), + (2621, 2635, 14, 0,'filler'), (2621, 2635, 1000015, 0,'filler'); + +SELECT * FROM t2 +WHERE ((((pk4 =0) AND (pk1 =2621) AND (pk2 =2635))) +OR ((pk4 =1) AND (((pk1 IN ( 7, 2, 1 ))) OR (pk1 =522)) AND ((pk2 IN ( 0, 2635)))) +) AND (pk3 >=1000000); +drop table t1, t2; + # End of 4.1 tests # diff --git a/mysql-test/t/rpl_insert.test b/mysql-test/t/rpl_insert.test new file mode 100644 index 00000000000..9225606c94b --- /dev/null +++ b/mysql-test/t/rpl_insert.test @@ -0,0 +1,29 @@ + +# +# Bug#20821: INSERT DELAYED fails to write some rows to binlog +# + +--source include/master-slave.inc +--source include/not_embedded.inc +--source include/not_windows.inc + +--disable_warnings +CREATE SCHEMA IF NOT EXISTS mysqlslap; +USE mysqlslap; +--enable_warnings + +CREATE TABLE t1 (id INT, name VARCHAR(64)); + +let $query = "INSERT DELAYED INTO t1 VALUES (1, 'Dr. No'), (2, 'From Russia With Love'), (3, 'Goldfinger'), (4, 'Thunderball'), (5, 'You Only Live Twice')"; +--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=200 --query=$query --delimiter=";" + +--sleep 10 + +SELECT COUNT(*) FROM mysqlslap.t1; +sync_slave_with_master; +SELECT COUNT(*) FROM mysqlslap.t1; + +connection master; +DROP SCHEMA IF EXISTS mysqlslap; +sync_slave_with_master; + diff --git a/mysql-test/t/rpl_row_create_table.test b/mysql-test/t/rpl_row_create_table.test index a42089441d2..3a711e5b496 100644 --- a/mysql-test/t/rpl_row_create_table.test +++ b/mysql-test/t/rpl_row_create_table.test @@ -97,6 +97,7 @@ CREATE TABLE t8 LIKE t4; CREATE TABLE t9 LIKE tt4; CREATE TEMPORARY TABLE tt5 LIKE t4; CREATE TEMPORARY TABLE tt6 LIKE tt4; +CREATE TEMPORARY TABLE tt7 SELECT 1; --echo **** On Master **** --query_vertical SHOW CREATE TABLE t8 --query_vertical SHOW CREATE TABLE t9 diff --git a/mysql-test/t/rpl_switch_stm_row_mixed.test b/mysql-test/t/rpl_switch_stm_row_mixed.test index 4a79b3995c4..6d282069ba1 100644 --- a/mysql-test/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/t/rpl_switch_stm_row_mixed.test @@ -15,22 +15,22 @@ select @@global.binlog_format, @@session.binlog_format; CREATE TABLE t1 (a varchar(100)); prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_1_"; +insert into t1 values("work_2_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_3_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values(concat("for_4_",UUID())); +insert into t1 select "yesterday_5_"; # verify that temp tables prevent a switch to SBR -create temporary table tmp(a char(3)); -insert into tmp values("see"); +create temporary table tmp(a char(100)); +insert into tmp values("see_6_"); --error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR set binlog_format=statement; insert into t1 select * from tmp; @@ -47,18 +47,18 @@ show session variables like "binlog_format%"; select @@global.binlog_format, @@session.binlog_format; prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_7_"; +insert into t1 values("work_8_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work"); +insert into t1 values("work_9_"); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values("for"); -insert into t1 select "yesterday"; +insert into t1 values("for_10_"); +insert into t1 select "yesterday_11_"; # test SET DEFAULT (=statement at this point of test) set binlog_format=default; @@ -69,18 +69,18 @@ set global binlog_format=default; select @@global.binlog_format, @@session.binlog_format; prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_12_"; +insert into t1 values("work_13_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work"); +insert into t1 values("work_14_"); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values("for"); -insert into t1 select "yesterday"; +insert into t1 values("for_15_"); +insert into t1 select "yesterday_16_"; # and now the mixed mode @@ -90,53 +90,52 @@ set global binlog_format=mixed; select @@global.binlog_format, @@session.binlog_format; prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency"; -insert into t1 values("work"); +set @string="emergency_17_"; +insert into t1 values("work_18_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_19_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values(concat("for_20_",UUID())); +insert into t1 select "yesterday_21_"; prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work")); +insert into t1 values(concat(UUID(),"work_22_")); execute stmt1 using @string; deallocate prepare stmt1; -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values(concat("for_23_",UUID())); +insert into t1 select "yesterday_24_"; # Test of CREATE TABLE SELECT -create table t2 select UUID(); +create table t2 select rpad(UUID(),100,' '); create table t3 select 1 union select UUID(); create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3); create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); # what if UUID() is first: insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); -# inside a stored procedure (inside a function or trigger won't -# work) +# inside a stored procedure delimiter |; create procedure foo() begin -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); -insert into t1 select "yesterday"; +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",UUID())); +insert into t1 select "yesterday_27_"; end| create procedure foo2() begin -insert into t1 values(concat("emergency",UUID())); -insert into t1 values("work"); -insert into t1 values(concat("for",UUID())); +insert into t1 values(concat("emergency_28_",UUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",UUID())); set session binlog_format=row; # accepted for stored procs -insert into t1 values("more work"); +insert into t1 values("more work_31_"); set session binlog_format=mixed; end| create function foo3() returns bigint unsigned @@ -145,15 +144,130 @@ begin insert into t1 values("alarm"); return 100; end| +create procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end| delimiter ;| call foo(); call foo2(); +call foo4("hello"); +call foo4(UUID()); +call foo4("world"); # test that can't SET in a stored function --error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT select foo3(); select * from t1 where a="alarm"; +# Tests of stored functions/triggers/views for BUG#20930 "Mixed +# binlogging mode does not work with stored functions, triggers, +# views" + +# Function which calls procedure +drop function foo3; +delimiter |; +create function foo3() returns bigint unsigned +begin + insert into t1 values("foo3_32_"); + call foo(); + return 100; +end| +delimiter ;| +insert into t2 select foo3(); + +prepare stmt1 from 'insert into t2 select foo3()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test if stored function calls stored function which calls procedure +# which requires row-based. + +delimiter |; +create function foo4() returns bigint unsigned +begin + insert into t2 select foo3(); + return 100; +end| +delimiter ;| +select foo4(); + +prepare stmt1 from 'select foo4()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function +delimiter |; +create function foo5() returns bigint unsigned +begin + insert into t2 select UUID(); + return 100; +end| +delimiter ;| +select foo5(); + +prepare stmt1 from 'select foo5()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function where UUID() is in the argument +delimiter |; +create function foo6(x varchar(100)) returns bigint unsigned +begin + insert into t2 select x; + return 100; +end| +delimiter ;| +select foo6("foo6_1_"); +select foo6(concat("foo6_2_",UUID())); + +prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + + +# Test of views using UUID() + +create view v1 as select uuid(); +create table t11 (data varchar(255)); +insert into t11 select * from v1; +# Test of querying INFORMATION_SCHEMA which parses the view's body, +# to verify that it binlogs statement-based (is not polluted by +# the parsing of the view's body). +insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); +prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test of triggers with UUID() +delimiter |; +create trigger t11_bi before insert on t11 for each row +begin + set NEW.data = concat(NEW.data,UUID()); +end| +delimiter ;| +insert into t11 values("try_560_"); + +# Test that INSERT DELAYED works in mixed mode (BUG#20649) +insert delayed into t2 values("delay_1_"); +insert delayed into t2 values(concat("delay_2_",UUID())); +insert delayed into t2 values("delay_6_"); + +# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not +# replicate fine in statement-based ; we test that in mixed mode it +# works). +insert delayed into t2 values(rand()); +set @a=2.345; +insert delayed into t2 values(@a); + +sleep 4; # time for the delayed inserts to reach disk + # If you want to do manual testing of the mixed mode regarding UDFs (not # testable automatically as quite platform- and compiler-dependent), # you just need to set the variable below to 1, and to @@ -164,30 +278,181 @@ if ($you_want_to_test_UDF) { CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so'; prepare stmt1 from 'insert into t1 select metaphon(?)'; - set @string="emergency"; - insert into t1 values("work"); + set @string="emergency_133_"; + insert into t1 values("work_134_"); execute stmt1 using @string; deallocate prepare stmt1; prepare stmt1 from 'insert into t1 select ?'; - insert into t1 values(metaphon("work")); + insert into t1 values(metaphon("work_135_")); execute stmt1 using @string; deallocate prepare stmt1; - insert into t1 values(metaphon("for")); - insert into t1 select "yesterday"; - create table t6 select metaphon("for"); - create table t7 select 1 union select metaphon("for"); - create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for") union select 3); + insert into t1 values(metaphon("for_136_")); + insert into t1 select "yesterday_137_"; + create table t6 select metaphon("for_138_"); + create table t7 select 1 union select metaphon("for_139_"); + create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3); create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); } +create table t20 select * from t1; # save for comparing later +create table t21 select * from t2; +create table t22 select * from t3; +drop table t1,t2,t3; + +# This tests the fix to +# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog +# We verify that under the mixed binlog mode, a stored function +# modifying at least two tables having an auto_increment column, +# is binlogged row-based. Indeed in statement-based binlogging, +# only the auto_increment value generated for the first table +# is recorded in the binlog, the value generated for the 2nd table +# lacking. + +create table t1 (a int primary key auto_increment, b varchar(100)); +create table t2 (a int primary key auto_increment, b varchar(100)); +create table t3 (b varchar(100)); +delimiter |; +create function f (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f("try_41_"); +# Two operations which compensate each other except that their net +# effect is that they advance the auto_increment counter of t2 on slave: +sync_slave_with_master; +use mysqltest1; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# this is the call which didn't replicate well +select f("try_42_"); +sync_slave_with_master; + +# now use prepared statement and test again, just to see that the RBB +# mode isn't set at PREPARE but at EXECUTE. + +insert into t2 values(3,null),(4,null); +delete from t2 where a>=3; + +connection master; +prepare stmt1 from 'select f(?)'; +set @string="try_43_"; +insert into t1 values(null,"try_44_"); # should be SBB +execute stmt1 using @string; # should be RBB +deallocate prepare stmt1; +sync_slave_with_master; + +# verify that if only one table has auto_inc, it does not trigger RBB +# (we'll check in binlog further below) + +connection master; +create table t12 select * from t1; # save for comparing later +drop table t1; +create table t1 (a int, b varchar(100), key(a)); +select f("try_45_"); + +# restore table's key +create table t13 select * from t1; +drop table t1; +create table t1 (a int primary key auto_increment, b varchar(100)); + +# now test if it's two functions, each of them inserts in one table + +drop function f; +# we need a unique key to have sorting of rows by mysqldump +create table t14 (unique (a)) select * from t2; +truncate table t2; +delimiter |; +create function f1 (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + return 1; +end| +create function f2 (x varchar(100)) returns int deterministic +begin + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f1("try_46_"),f2("try_47_"); + +sync_slave_with_master; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# Test with SELECT and INSERT +select f1("try_48_"),f2("try_49_"); +insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); +sync_slave_with_master; + +# verify that if f2 does only read on an auto_inc table, this does not +# switch to RBB +connection master; +drop function f2; +delimiter |; +create function f2 (x varchar(100)) returns int deterministic +begin + declare y int; + insert into t1 values(null,x); + set y = (select count(*) from t2); + return y; +end| +delimiter ;| +select f1("try_53_"),f2("try_54_"); +sync_slave_with_master; + +# And now, a normal statement with a trigger (no stored functions) + +connection master; +drop function f2; +delimiter |; +create trigger t1_bi before insert on t1 for each row +begin + insert into t2 values(null,"try_55_"); +end| +delimiter ;| +insert into t1 values(null,"try_56_"); +# and now remove one auto_increment and verify SBB +alter table t1 modify a int, drop primary key; +insert into t1 values(null,"try_57_"); +sync_slave_with_master; + +# Test for BUG#20499 "mixed mode with temporary table breaks binlog" +# Slave used to have only 2 rows instead of 3. +connection master; +CREATE TEMPORARY TABLE t15 SELECT UUID(); +create table t16 like t15; +INSERT INTO t16 SELECT * FROM t15; +# we'll verify that this one is done RBB +insert into t16 values("try_65_"); +drop table t15; +# we'll verify that this one is done SBB +insert into t16 values("try_66_"); +sync_slave_with_master; + # and now compare: +connection master; + # first check that data on master is sensible select count(*) from t1; select count(*) from t2; select count(*) from t3; select count(*) from t4; select count(*) from t5; +select count(*) from t11; +select count(*) from t20; +select count(*) from t21; +select count(*) from t22; +select count(*) from t12; +select count(*) from t13; +select count(*) from t14; +select count(*) from t16; if ($you_want_to_test_UDF) { select count(*) from t6; @@ -196,21 +461,46 @@ if ($you_want_to_test_UDF) select count(*) from t9; } ---replace_column 2 # 5 # ---replace_regex /table_id: [0-9]+/table_id: #/ -show binlog events from 102; sync_slave_with_master; # as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID --exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql --exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql -connection master; -drop database mysqltest1; -sync_slave_with_master; - # Let's compare. Note: If they match test will pass, if they do not match # the test will show that the diff statement failed and not reject file # will be created. You will need to go to the mysql-test dir and diff # the files your self to see what is not matching --exec diff $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; + +connection master; +--replace_column 2 # 5 # +--replace_regex /table_id: [0-9]+/table_id: #/ +show binlog events from 102; + +# Now test that mysqlbinlog works fine on a binlog generated by the +# mixed mode + +# BUG#11312 "DELIMITER is not written to the binary log that causes +# syntax error" makes that mysqlbinlog will fail if we pass it the +# text of queries; this forces us to use --base64-output here. + +# BUG#20929 "BINLOG command causes invalid free plus assertion +# failure" makes mysqld segfault when receiving --base64-output + +# So I can't enable this piece of test +# SIGH + +if ($enable_when_11312_or_20929_fixed) +{ +--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +drop database mysqltest1; +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql +# the old mysqldump output on slave is the same as what it was on +# master before restoring on master. +--exec diff $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; +} + +drop database mysqltest1; +sync_slave_with_master; diff --git a/mysql-test/t/rpl_temporary.test b/mysql-test/t/rpl_temporary.test index 0d91a9f8e91..fc336db1a3a 100644 --- a/mysql-test/t/rpl_temporary.test +++ b/mysql-test/t/rpl_temporary.test @@ -142,11 +142,8 @@ create temporary table t4 (f int); create table t5 (f int); sync_with_master; # find dumper's $id -source include/get_binlog_dump_thread_id.inc; -insert into t4 values (1); -# a hint how to do that in 5.1 ---replace_result $id "`select id from information_schema.processlist where command='Binlog Dump'`" -eval kill $id; # to stimulate reconnection by slave w/o timeout +select id from information_schema.processlist where command='Binlog Dump' into @id; +kill @id; # to stimulate reconnection by slave w/o timeout insert into t5 select * from t4; save_master_pos; @@ -170,7 +167,7 @@ create temporary table t101 (id int); create temporary table t102 (id int); set @@session.pseudo_thread_id=200; create temporary table t201 (id int); -#create temporary table `t``201` (id int); +create temporary table `t``201` (id int); # emulate internal temp table not to come to binlog create temporary table `#sql_not_user_table202` (id int); set @@session.pseudo_thread_id=300; @@ -203,4 +200,4 @@ select * from t1; connection master; drop table t1; -# End of 5.0 tests +# End of 5.1 tests diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index 94894ef50de..00520df350c 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -395,11 +395,10 @@ show create table t1; drop table if exists t1; system rm -f $MYSQLTEST_VARDIR/master-data/test/t1.frm ; - -# End of 4.1 tests # # BUG 12183 - SHOW OPEN TABLES behavior doesn't match grammar # First we close all open tables with FLUSH tables and then we open some. +# CREATE TABLE txt1(a int); CREATE TABLE tyt2(a int); CREATE TABLE urkunde(a int); @@ -421,7 +420,78 @@ DROP TABLE urkunde; --error 1049 SHOW TABLES FROM non_existing_database; -# End of 4.1 tests +--echo End of 4.1 tests + +# +# Bug#17203: "sql_no_cache sql_cache" in views created from prepared +# statement +# +# The problem was that initial user setting was forgotten, and current +# runtime-determined values of the flags were shown instead. +# +--disable_warnings +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +--enable_warnings + +# Check that SHOW CREATE VIEW shows SQL_CACHE flag exaclty as +# specified by the user. +CREATE VIEW v1 AS SELECT 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Usage of NOW() disables caching, but we still have show what the +# user have specified. +CREATE VIEW v1 AS SELECT NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Check that SQL_NO_CACHE always wins. +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Check CREATE VIEW in a prepared statement in a procedure. +delimiter |; +CREATE PROCEDURE p1() +BEGIN + SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1'; + PREPARE stmt FROM @s; + EXECUTE stmt; + DROP PREPARE stmt; +END | +delimiter ;| +CALL p1(); +SHOW CREATE VIEW v1; + +DROP PROCEDURE p1; +DROP VIEW v1; + +--echo End of 5.0 tests. --disable_result_log SHOW AUTHORS; diff --git a/mysql-test/t/sp-security.test b/mysql-test/t/sp-security.test index e791729fac4..591e9a3ed70 100644 --- a/mysql-test/t/sp-security.test +++ b/mysql-test/t/sp-security.test @@ -721,4 +721,50 @@ DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +# +# Bug#19857 - When a user with CREATE ROUTINE priv creates a routine, +# it results in NULL p/w +# + +# Can't test with embedded server that doesn't support grants + +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--echo +--echo ---> connection: mysqltest_2_con +--connection mysqltest_2_con + +use test; + +DELIMITER //; + CREATE PROCEDURE sp19857() DETERMINISTIC + BEGIN + DECLARE a INT; + SET a=1; + SELECT a; + END // +DELIMITER ;// + +SHOW CREATE PROCEDURE test.sp19857; + +--disconnect mysqltest_2_con +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--connection mysqltest_2_con + +DROP PROCEDURE IF EXISTS test.sp19857; + +--echo +--echo ---> connection: root +--connection con1root + +--disconnect mysqltest_2_con + +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +DROP USER user19857@localhost; + # End of 5.0 bugs. diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 58adffc6e87..735a2ad78b8 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -237,7 +237,7 @@ begin end| delimiter ;| insert into t3 values (1); ---error 1048 +--error ER_BAD_NULL_ERROR insert into t1 values (4, "four", 1), (5, "five", 2); select * from t1; select * from t2; @@ -295,19 +295,19 @@ drop table t1, t2; create table t1 (i int); create table t3 (i int); ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before insert on t1 for each row set @a:= old.i; ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before delete on t1 for each row set @a:= new.i; ---error 1362 +--error ER_TRG_CANT_CHANGE_ROW create trigger trg before update on t1 for each row set old.i:=1; ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before delete on t1 for each row set new.i:=1; ---error 1362 +--error ER_TRG_CANT_CHANGE_ROW create trigger trg after update on t1 for each row set new.i:=1; ---error 1054 +--error ER_BAD_FIELD_ERROR create trigger trg before update on t1 for each row set new.j:=1; ---error 1054 +--error ER_BAD_FIELD_ERROR create trigger trg before update on t1 for each row set @a:=old.j; @@ -315,25 +315,25 @@ create trigger trg before update on t1 for each row set @a:=old.j; # Let us test various trigger creation errors # Also quickly test table namespace (bug#5892/6182) # ---error 1146 +--error ER_NO_SUCH_TABLE create trigger trg before insert on t2 for each row set @a:=1; create trigger trg before insert on t1 for each row set @a:=1; ---error 1359 +--error ER_TRG_ALREADY_EXISTS create trigger trg after insert on t1 for each row set @a:=1; ---error 1359 +--error ER_NOT_SUPPORTED_YET create trigger trg2 before insert on t1 for each row set @a:=1; ---error 1359 +--error ER_TRG_ALREADY_EXISTS create trigger trg before insert on t3 for each row set @a:=1; create trigger trg2 before insert on t3 for each row set @a:=1; drop trigger trg2; drop trigger trg; ---error 1360 +--error ER_TRG_DOES_NOT_EXIST drop trigger trg; create view v1 as select * from t1; ---error 1347 +--error ER_WRONG_OBJECT create trigger trg before insert on v1 for each row set @a:=1; drop view v1; @@ -341,7 +341,7 @@ drop table t1; drop table t3; create temporary table t1 (i int); ---error 1361 +--error ER_TRG_ON_VIEW_OR_TEMP_TABLE create trigger trg before insert on t1 for each row set @a:=1; drop table t1; @@ -495,47 +495,47 @@ select * from t1; # their main effect. This is because operation on the table row is # executed before "after" trigger and its effect cannot be rolled back # when whole statement fails, because t1 is MyISAM table. ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (2, 1); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1 set k = 2 where i = 2; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete from t1 where i = 2; select * from t1; # Should fail and insert only 1 row ---error 1054 +--error ER_BAD_FIELD_ERROR load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 select 3, 3; select * from t1; # Multi-update working on the fly, again it will update only # one row even if more matches ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i; select * from t1; # The same for multi-update via temp table ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i and k < 3; select * from t1; # Multi-delete on the fly ---error 1054 +--error ER_BAD_FIELD_ERROR delete t1, t2 from t1 straight_join t2 where t1.i = t2.i; select * from t1; # And via temporary storage ---error 1054 +--error ER_BAD_FIELD_ERROR delete t2, t1 from t2 straight_join t1 where t1.i = t2.i; select * from t1; # Prepare table for testing of REPLACE and INSERT ... ON DUPLICATE KEY UPDATE alter table t1 add primary key (i); ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (3, 4) on duplicate key update k= k + 10; select * from t1; # The following statement will delete old row and won't # insert new one since after delete trigger will fail. ---error 1054 +--error ER_BAD_FIELD_ERROR replace into t1 values (3, 3); select * from t1; # Also drops all triggers @@ -553,33 +553,33 @@ alter table t1 drop column bt; # The following statements changing t1 should fail and should not # cause any effect on table, since "before" trigger is executed # before operation on the table row. ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (3, 3); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1 set i = 2; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete from t1; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 select 3, 3; select * from t1; # Both types of multi-update (on the fly and via temp table) ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i and k < 2; select * from t1; # Both types of multi-delete ---error 1054 +--error ER_BAD_FIELD_ERROR delete t1, t2 from t1 straight_join t2 where t1.i = t2.i; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete t2, t1 from t2 straight_join t1 where t1.i = t2.i; select * from t1; # Let us test REPLACE/INSERT ... ON DUPLICATE KEY UPDATE. @@ -587,10 +587,10 @@ select * from t1; # in ordinary INSERT we need to drop "before insert" trigger. alter table t1 add primary key (i); drop trigger bi; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (2, 4) on duplicate key update k= k + 10; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR replace into t1 values (2, 4); select * from t1; # Also drops all triggers @@ -608,7 +608,7 @@ insert into t1 values (1, 2); create function bug5893 () returns int return 5; create trigger t1_bu before update on t1 for each row set new.col1= bug5893(); drop function bug5893; ---error 1305 +--error ER_SP_DOES_NOT_EXIST update t1 set col2 = 4; # This should not crash server too. drop trigger t1_bu; @@ -908,9 +908,9 @@ create trigger t1_bi after insert on t1 for each row insert into t3 values (new. # Until we implement proper mechanism for invalidation of PS/SP when table # or SP's are changed these two statements will fail with 'Table ... was # not locked' error (this mechanism should be based on the new TDC). ---error 1100 +--error 1100 #ER_TABLE_NOT_LOCKED execute stmt1; ---error 1100 +--error 1100 #ER_TABLE_NOT_LOCKED call p1(); deallocate prepare stmt1; drop procedure p1; @@ -1186,7 +1186,7 @@ INSERT INTO t1 VALUES (@x); SELECT @x; SET @x=2; ---error 1365 +--error ER_DIVISION_BY_ZERO UPDATE t1 SET i1 = @x; SELECT @x; @@ -1197,7 +1197,7 @@ INSERT INTO t1 VALUES (@x); SELECT @x; SET @x=4; ---error 1365 +--error ER_DIVISION_BY_ZERO UPDATE t1 SET i1 = @x; SELECT @x; @@ -1281,4 +1281,26 @@ SELECT * FROM t1; DROP TABLE t1; -# End of 5.0 tests +# +# Bug #18005: Creating a trigger on mysql.event leads to server crash on +# scheduler startup +# +# Bug #18361: Triggers on mysql.user table cause server crash +# +# We don't allow triggers on the mysql schema +delimiter |; +--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA +create trigger wont_work after update on mysql.user for each row +begin + set @a:= 1; +end| +# Try when we're already using the mysql schema +use mysql| +--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA +create trigger wont_work after update on event for each row +begin + set @a:= 1; +end| +delimiter ;| + +--echo End of 5.0 tests diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index 7aa79f0eb40..771a4ad3ed3 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -302,6 +302,22 @@ set wait_timeout=100; set log_warnings=1; # +# Bugs: #20392: INSERT_ID session variable has weird value +# +select @@session.insert_id; +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; + +set session last_insert_id=100; +select @@session.insert_id; +select @@session.last_insert_id; +select @@session.insert_id; + +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; + +# # key buffer # diff --git a/mysys/my_handler.c b/mysys/my_handler.c index bfec44d57a4..46144c0dff2 100644 --- a/mysys/my_handler.c +++ b/mysys/my_handler.c @@ -15,6 +15,7 @@ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ +#include <my_global.h> #include "my_handler.h" int mi_compare_text(CHARSET_INFO *charset_info, uchar *a, uint a_length, diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index b431667063a..518a6a5fdd0 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -525,8 +525,9 @@ char *_my_strdup(const char *from, const char *filename, uint lineno, } /* _my_strdup */ -char *_my_strndup(const char *from, uint length, const char *filename, - uint lineno, myf MyFlags) +char *_my_strndup(const char *from, uint length, + const char *filename, uint lineno, + myf MyFlags) { gptr ptr; if ((ptr=_mymalloc(length+1,filename,lineno,MyFlags)) != 0) diff --git a/server-tools/instance-manager/mysqlmanager.vcproj b/server-tools/instance-manager/mysqlmanager.vcproj index ef8b2dd017e..bbcb94fa221 100644 --- a/server-tools/instance-manager/mysqlmanager.vcproj +++ b/server-tools/instance-manager/mysqlmanager.vcproj @@ -34,7 +34,7 @@ <Tool Name="VCLinkerTool" AdditionalDependencies="wsock32.lib" - OutputFile="$(OutDir)/mysqlmanager.exe" + OutputFile="../../client_debug/mysqlmanager.exe" LinkIncremental="2" GenerateDebugInformation="TRUE" ProgramDatabaseFile="$(OutDir)/mysqlmanager.pdb" @@ -82,7 +82,7 @@ <Tool Name="VCLinkerTool" AdditionalDependencies="wsock32.lib" - OutputFile="$(OutDir)/mysqlmanager.exe" + OutputFile="../../client_release/mysqlmanager.exe" LinkIncremental="1" GenerateDebugInformation="TRUE" SubSystem="1" diff --git a/sql-common/client.c b/sql-common/client.c index 160f6eb2602..8c0420a27ba 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -1701,7 +1701,7 @@ int mysql_init_character_set(MYSQL *mysql) C_MODE_END -MYSQL * +MYSQL * STDCALL CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, uint port, const char *unix_socket,ulong client_flag) diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index a44000be19c..ea1100938d7 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -32,13 +32,14 @@ so to read, that data has to be parsed into fields, to write, fields have to be stored in this format to write to this data file. - With MySQL Federated storage engine, there will be no local files for each - table's data (such as .MYD). A foreign database will store the data that would - normally be in this file. This will necessitate the use of MySQL client API - to read, delete, update, insert this data. The data will have to be retrieve - via an SQL call "SELECT * FROM users". Then, to read this data, it will have - to be retrieved via mysql_fetch_row one row at a time, then converted from - the column in this select into the format that the handler expects. + With MySQL Federated storage engine, there will be no local files + for each table's data (such as .MYD). A foreign database will store + the data that would normally be in this file. This will necessitate + the use of MySQL client API to read, delete, update, insert this + data. The data will have to be retrieve via an SQL call "SELECT * + FROM users". Then, to read this data, it will have to be retrieved + via mysql_fetch_row one row at a time, then converted from the + column in this select into the format that the handler expects. The create table will simply create the .frm file, and within the "CREATE TABLE" SQL, there SHALL be any of the following : @@ -385,8 +386,8 @@ static handler *federated_create_handler(TABLE_SHARE *table, static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, my_bool not_used __attribute__ ((unused))) { - *length= share->table_name_length; - return (byte*) share->table_name; + *length= share->connect_string_length; + return (byte*) share->scheme; } /* @@ -415,7 +416,7 @@ int federated_db_init() if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) goto error; - if (!hash_init(&federated_open_tables, system_charset_info, 32, 0, 0, + if (!hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0, (hash_get_key) federated_get_key, 0, 0)) { federated_init= TRUE; @@ -510,6 +511,7 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, } else { + int escaped_table_name_length= 0; /* Since we do not support transactions at this version, we can let the client API silently reconnect. For future versions, we will need more @@ -528,17 +530,16 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, query.append(FEDERATED_STAR); query.append(FEDERATED_FROM); query.append(FEDERATED_BTICK); - escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name, + escaped_table_name_length= + escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name, sizeof(escaped_table_name), share->table_name, share->table_name_length); - query.append(escaped_table_name); + query.append(escaped_table_name, escaped_table_name_length); query.append(FEDERATED_BTICK); query.append(FEDERATED_WHERE); query.append(FEDERATED_FALSE); - DBUG_PRINT("info", ("check_foreign_data_source query %s", - query.c_ptr_quick())); if (mysql_real_query(mysql, query.ptr(), query.length())) { error_code= table_create_flag ? @@ -630,10 +631,10 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length, table->s->connect_string.str)); share->scheme= my_strndup(table->s->connect_string.str, - table->s->connect_string.length, MYF(0)); + table->s->connect_string.length, + MYF(0)); - // Add a null for later termination of table name - share->scheme[table->s->connect_string.length]= 0; + share->connect_string_length= table->s->connect_string.length; DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); /* @@ -699,7 +700,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->table_name++; share->table_name_length= strlen(share->table_name); - + /* make sure there's not an extra / */ if ((strchr(share->table_name, '/'))) goto error; @@ -735,8 +736,7 @@ error: ha_federated::ha_federated(TABLE_SHARE *table_arg) :handler(&federated_hton, table_arg), - mysql(0), stored_result(0), - ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0) + mysql(0), stored_result(0) { trx_next= 0; } @@ -747,8 +747,9 @@ ha_federated::ha_federated(TABLE_SHARE *table_arg) SYNOPSIS convert_row_to_internal_format() - record Byte pointer to record - row MySQL result set row from fetchrow() + record Byte pointer to record + row MySQL result set row from fetchrow() + result Result set to use DESCRIPTION This method simply iterates through a row returned via fetchrow with @@ -761,15 +762,16 @@ ha_federated::ha_federated(TABLE_SHARE *table_arg) 0 After fields have had field values stored from record */ -uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) +uint ha_federated::convert_row_to_internal_format(byte *record, + MYSQL_ROW row, + MYSQL_RES *result) { ulong *lengths; Field **field; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); DBUG_ENTER("ha_federated::convert_row_to_internal_format"); - lengths= mysql_fetch_lengths(stored_result); - memset(record, 0, table->s->null_bytes); + lengths= mysql_fetch_lengths(result); for (field= table->field; *field; field++, row++, lengths++) { @@ -1314,12 +1316,11 @@ err: static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { - char *select_query, *tmp_table_name; + char *select_query; char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - uint tmp_table_name_length; Field **field; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); - FEDERATED_SHARE *share; + FEDERATED_SHARE *share= NULL, tmp_share; /* In order to use this string, we must first zero it's length, or it will contain garbage @@ -1327,12 +1328,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.length(0); pthread_mutex_lock(&federated_mutex); - tmp_table_name= table->s->table_name.str; - tmp_table_name_length= table->s->table_name.length; + if (parse_url(&tmp_share, table, 0)) + goto error; + + /* TODO: change tmp_share.scheme to LEX_STRING object */ if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables, - (byte*) table_name, - tmp_table_name_length))) + (byte*) tmp_share.scheme, + tmp_share. + connect_string_length))) { query.set_charset(system_charset_info); query.append(FEDERATED_SELECT); @@ -1350,18 +1354,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) if (!(share= (FEDERATED_SHARE *) my_multi_malloc(MYF(MY_WME), &share, sizeof(*share), - &tmp_table_name, tmp_table_name_length+ 1, &select_query, query.length()+table->s->connect_string.length+1, NullS))) - { - pthread_mutex_unlock(&federated_mutex); - return NULL; - } - - if (parse_url(share, table, 0)) goto error; + memcpy(share, &tmp_share, sizeof(tmp_share)); + + share->table_name_length= strlen(share->table_name); + /* TODO: share->table_name to LEX_STRING object */ query.append(share->table_name, share->table_name_length); query.append(FEDERATED_BTICK); share->select_query= select_query; @@ -1382,11 +1383,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) error: pthread_mutex_unlock(&federated_mutex); - if (share->scheme) - { - my_free((gptr) share->scheme, MYF(0)); - share->scheme= 0; - } + my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); return NULL; } @@ -1406,13 +1404,7 @@ static int free_share(FEDERATED_SHARE *share) { hash_delete(&federated_open_tables, (byte*) share); my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR)); - share->scheme= 0; - if (share->socket) - { - my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); - share->socket= 0; - } - + my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); my_free((gptr) share, MYF(0)); @@ -1474,14 +1466,15 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) /* Connect to foreign database mysql_real_connect() */ mysql= mysql_init(0); - if (!mysql_real_connect(mysql, - share->hostname, - share->username, - share->password, - share->database, - share->port, - share->socket, 0)) + if (!mysql || !mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) { + free_share(share); DBUG_RETURN(stash_remote_error()); } /* @@ -1491,6 +1484,11 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) */ mysql->reconnect= 1; + ref_length= (table->s->primary_key != MAX_KEY ? + table->key_info[table->s->primary_key].key_length : + table->s->reclength); + DBUG_PRINT("info", ("ref_length: %u", ref_length)); + DBUG_RETURN(0); } @@ -1514,13 +1512,12 @@ int ha_federated::close(void) /* free the result set */ if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result result at address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } /* Disconnect from mysql */ - mysql_close(mysql); + if (mysql) // QQ is this really needed + mysql_close(mysql); retval= free_share(share); DBUG_RETURN(retval); @@ -1686,8 +1683,6 @@ int ha_federated::write_row(byte *buf) /* add the values */ insert_string.append(values_string); - DBUG_PRINT("info", ("insert query %s", insert_string.c_ptr_quick())); - if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length())) { DBUG_RETURN(stash_remote_error()); @@ -1708,14 +1703,15 @@ int ha_federated::write_row(byte *buf) This method ensures that last_insert_id() works properly. What it simply does is calls last_insert_id() on the foreign database immediately after insert (if the table has an auto_increment field) and sets the insert id via - thd->insert_id(ID) (as well as storing thd->prev_insert_id) + thd->insert_id(ID)). */ void ha_federated::update_auto_increment(void) { THD *thd= current_thd; DBUG_ENTER("ha_federated::update_auto_increment"); - thd->insert_id(mysql->last_used_con->insert_id); + thd->first_successful_insert_id_in_cur_stmt= + mysql->last_used_con->insert_id; DBUG_PRINT("info",("last_insert_id %d", stats.auto_increment_value)); DBUG_VOID_RETURN; @@ -1763,7 +1759,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) query.append(FEDERATED_EXTENDED); if (check_opt->sql_flags & TT_USEFRM) query.append(FEDERATED_USE_FRM); - + if (mysql_real_query(mysql, query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); @@ -1919,7 +1915,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) /* This will delete a row. 'buf' will contain a copy of the row to be =deleted. The server will call this right after the current row has been called (from - either a previous rnd_nexT() or index call). + either a previous rnd_next() or index call). If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does no guarentee consecutive deletions. @@ -1985,6 +1981,7 @@ int ha_federated::delete_row(const byte *buf) DBUG_RETURN(stash_remote_error()); } stats.deleted+= mysql->affected_rows; + stats.records-= mysql->affected_rows; DBUG_PRINT("info", ("rows deleted %d rows deleted for all time %d", int(mysql->affected_rows), stats.deleted)); @@ -2001,12 +1998,15 @@ int ha_federated::delete_row(const byte *buf) */ int ha_federated::index_read(byte *buf, const byte *key, - uint key_len, enum ha_rkey_function find_flag) + uint key_len, ha_rkey_function find_flag) { - int retval; DBUG_ENTER("ha_federated::index_read"); - retval= index_read_idx(buf, active_index, key, key_len, find_flag); - DBUG_RETURN(retval); + + if (stored_result) + mysql_free_result(stored_result); + DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key, + key_len, find_flag, + &stored_result)); } @@ -2015,26 +2015,60 @@ int ha_federated::index_read(byte *buf, const byte *key, row if any. This is only used to read whole keys. This method is called via index_read in the case of a WHERE clause using - a regular non-primary key index, OR is called DIRECTLY when the WHERE clause + a primary key index OR is called DIRECTLY when the WHERE clause uses a PRIMARY KEY index. + + NOTES + This uses an internal result set that is deleted before function + returns. We need to be able to be calable from ha_rnd_pos() */ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, uint key_len, enum ha_rkey_function find_flag) { int retval; + MYSQL_RES *mysql_result; + DBUG_ENTER("ha_federated::index_read_idx"); + + if ((retval= index_read_idx_with_result_set(buf, index, key, + key_len, find_flag, + &mysql_result))) + DBUG_RETURN(retval); + mysql_free_result(mysql_result); + DBUG_RETURN(retval); +} + + +/* + Create result set for rows matching query and return first row + + RESULT + 0 ok In this case *result will contain the result set + table->status == 0 + # error In this case *result will contain 0 + table->status == STATUS_NOT_FOUND +*/ + +int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result) +{ + int retval; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char index_value[STRING_BUFFER_USUAL_SIZE]; char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - String index_string(index_value, + String index_string(index_value, sizeof(index_value), &my_charset_bin); String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin); key_range range; - DBUG_ENTER("ha_federated::index_read_idx"); + DBUG_ENTER("ha_federated::index_read_idx_with_result_set"); + *result= 0; // In case of errors index_string.length(0); sql_query.length(0); statistic_increment(table->in_use->status_var.ha_read_key_count, @@ -2051,20 +2085,6 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, NULL, 0, 0); sql_query.append(index_string); - DBUG_PRINT("info", - ("current key %d key value %s index_string value %s length %d", - index, (char*) key, index_string.c_ptr_quick(), - index_string.length())); - - DBUG_PRINT("info", - ("current position %d sql_query %s", current_position, - sql_query.c_ptr_quick())); - - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", @@ -2072,45 +2092,41 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; goto error; } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(*result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - /* - This basically says that the record in table->record[0] is legal, - and that it is ok to use this record, for whatever reason, such - as with a join (without it, joins will not work) - */ - table->status= 0; + if (!(retval= read_next(buf, *result))) + DBUG_RETURN(retval); - retval= rnd_next(buf); + mysql_free_result(*result); + *result= 0; + table->status= STATUS_NOT_FOUND; DBUG_RETURN(retval); error: - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } table->status= STATUS_NOT_FOUND; my_error(retval, MYF(0), error_buffer); DBUG_RETURN(retval); } + /* Initialized at each key walk (called multiple times unlike rnd_init()) */ + int ha_federated::index_init(uint keynr, bool sorted) { DBUG_ENTER("ha_federated::index_init"); - DBUG_PRINT("info", - ("table: '%s' key: %d", table->s->table_name.str, keynr)); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); active_index= keynr; DBUG_RETURN(0); } +/* + Read first range +*/ + int ha_federated::read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted) @@ -2122,8 +2138,7 @@ int ha_federated::read_range_first(const key_range *start_key, &my_charset_bin); DBUG_ENTER("ha_federated::read_range_first"); - if (start_key == NULL && end_key == NULL) - DBUG_RETURN(0); + DBUG_ASSERT(!(start_key == NULL && end_key == NULL)); sql_query.length(0); sql_query.append(share->select_query); @@ -2131,6 +2146,11 @@ int ha_federated::read_range_first(const key_range *start_key, &table->key_info[active_index], start_key, end_key, 0, eq_range); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; @@ -2138,38 +2158,21 @@ int ha_federated::read_range_first(const key_range *start_key, } sql_query.length(0); - if (stored_result) - { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(stored_result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - - /* This was successful, please let it be known! */ - table->status= 0; - retval= rnd_next(table->record[0]); + retval= read_next(table->record[0], stored_result); DBUG_RETURN(retval); error: - table->status= STATUS_NOT_FOUND; - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - DBUG_RETURN(retval); + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(retval); } + int ha_federated::read_range_next() { int retval; @@ -2182,13 +2185,13 @@ int ha_federated::read_range_next() /* Used to read forward through the index. */ int ha_federated::index_next(byte *buf) { - int retval; DBUG_ENTER("ha_federated::index_next"); statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); - retval= rnd_next(buf); - DBUG_RETURN(retval); + DBUG_RETURN(read_next(buf, stored_result)); } + + /* rnd_init() is called when the system wants the storage engine to do a table scan. @@ -2242,11 +2245,8 @@ int ha_federated::rnd_init(bool scan) if (scan) { - DBUG_PRINT("info", ("share->select_query %s", share->select_query)); if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } @@ -2263,28 +2263,25 @@ int ha_federated::rnd_init(bool scan) DBUG_RETURN(0); error: - DBUG_RETURN(stash_remote_error()); + DBUG_RETURN(stash_remote_error()); } + int ha_federated::rnd_end() { - int retval; DBUG_ENTER("ha_federated::rnd_end"); - - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - retval= index_end(); - DBUG_RETURN(retval); + DBUG_RETURN(index_end()); } int ha_federated::index_end(void) { DBUG_ENTER("ha_federated::index_end"); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } active_index= MAX_KEY; DBUG_RETURN(0); } @@ -2302,8 +2299,6 @@ int ha_federated::index_end(void) int ha_federated::rnd_next(byte *buf) { - int retval; - MYSQL_ROW row; DBUG_ENTER("ha_federated::rnd_next"); if (stored_result == 0) @@ -2311,32 +2306,60 @@ int ha_federated::rnd_next(byte *buf) /* Return value of rnd_init is not always checked (see records.cc), so we can get here _even_ if there is _no_ pre-fetched result-set! - TODO: fix it. - */ + TODO: fix it. We can delete this in 5.1 when rnd_init() is checked. + */ DBUG_RETURN(1); } - + DBUG_RETURN(read_next(buf, stored_result)); +} + + +/* + ha_federated::read_next + + reads from a result set and converts to mysql internal + format + + SYNOPSIS + field_in_record_is_null() + buf byte pointer to record + result mysql result set + + DESCRIPTION + This method is a wrapper method that reads one record from a result + set and converts it to the internal table format + + RETURN VALUE + 1 error + 0 no error +*/ + +int ha_federated::read_next(byte *buf, MYSQL_RES *result) +{ + int retval; + my_ulonglong num_rows; + MYSQL_ROW row; + DBUG_ENTER("ha_federated::read_next"); + + table->status= STATUS_NOT_FOUND; // For easier return + /* Fetch a row, insert it back in a row format. */ - current_position= stored_result->data_cursor; - DBUG_PRINT("info", ("current position %d", current_position)); - if (!(row= mysql_fetch_row(stored_result))) + if (!(row= mysql_fetch_row(result))) DBUG_RETURN(HA_ERR_END_OF_FILE); - retval= convert_row_to_internal_format(buf, row); + if (!(retval= convert_row_to_internal_format(buf, row, result))) + table->status= 0; + DBUG_RETURN(retval); } /* - 'position()' is called after each call to rnd_next() if the data needs to be - ordered. You can do something like the following to store the position: - my_store_ptr(ref, ref_length, current_position); + store reference to current row so that we can later find it for + a re-read, update or delete. - The server uses ref to store data. ref_length in the above case is the size - needed to store current_position. ref is just a byte array that the server - will maintain. If you are using offsets to mark rows, then current_position - should be the offset. If it is a primary key like in BDB, then it needs to - be a primary key. + In case of federated, a reference is either a primary key or + the whole record. Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. */ @@ -2344,32 +2367,44 @@ int ha_federated::rnd_next(byte *buf) void ha_federated::position(const byte *record) { DBUG_ENTER("ha_federated::position"); - /* my_store_ptr Add seek storage */ - *(MYSQL_ROW_OFFSET *) ref= current_position; // ref is always aligned + if (table->s->primary_key != MAX_KEY) + key_copy(ref, (byte *)record, table->key_info + table->s->primary_key, + ref_length); + else + memcpy(ref, record, ref_length); DBUG_VOID_RETURN; } /* This is like rnd_next, but you are given a position to use to determine the - row. The position will be of the type that you stored in ref. You can use - ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved - when position() was called. + row. The position will be of the type that you stored in ref. - This method is required for an ORDER BY. + This method is required for an ORDER BY Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. */ + int ha_federated::rnd_pos(byte *buf, byte *pos) { + int result; DBUG_ENTER("ha_federated::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status); - memcpy_fixed(¤t_position, pos, sizeof(MYSQL_ROW_OFFSET)); - stored_result->current_row= 0; - stored_result->data_cursor= current_position; - DBUG_RETURN(rnd_next(buf)); + if (table->s->primary_key != MAX_KEY) + { + /* We have a primary key, so use index_read_idx to find row */ + result= index_read_idx(buf, table->s->primary_key, pos, + ref_length, HA_READ_KEY_EXACT); + } + else + { + /* otherwise, get the old record ref as obtained in ::position */ + memcpy(buf, pos, ref_length); + result= 0; + } + table->status= result ? STATUS_NOT_FOUND : 0; + DBUG_RETURN(result); } @@ -2462,7 +2497,7 @@ void ha_federated::info(uint flag) if (flag & HA_STATUS_VARIABLE | HA_STATUS_CONST) { - /* + /* deleted is set in ha_federated::info */ /* @@ -2474,11 +2509,13 @@ void ha_federated::info(uint flag) delete_length = ? */ if (row[4] != NULL) - stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0, + stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); if (row[5] != NULL) - stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, - &error); + stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error); + + stats.data_file_length= stats.records * stats.mean_rec_length; + if (row[12] != NULL) stats.update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); @@ -2486,8 +2523,13 @@ void ha_federated::info(uint flag) stats.check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); } + /* + size of IO operations (This is based on a good guess, no high science + involved) + */ if (flag & HA_STATUS_CONST) stats.block_size= 4096; + } if (result) @@ -2498,6 +2540,7 @@ void ha_federated::info(uint flag) error: if (result) mysql_free_result(result); + my_sprintf(error_buffer, (error_buffer, ": %d : %s", mysql_errno(mysql), mysql_error(mysql))); my_error(error_code, MYF(0), error_buffer); @@ -2578,6 +2621,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + DBUG_ENTER("ha_federated::store_lock"); if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* @@ -2607,7 +2651,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, *to++= &lock; - return to; + DBUG_RETURN(to); } /* diff --git a/sql/ha_federated.h b/sql/ha_federated.h index 4a4561ba274..fdb443e74c5 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -130,6 +130,7 @@ typedef struct st_federated_share { remote host info, parse_url supplies */ char *scheme; + char *connect_string; char *hostname; char *username; char *password; @@ -139,7 +140,7 @@ typedef struct st_federated_share { char *socket; char *sport; ushort port; - uint table_name_length, use_count; + uint table_name_length, connect_string_length, use_count; pthread_mutex_t mutex; THR_LOCK lock; } FEDERATED_SHARE; @@ -153,7 +154,6 @@ class ha_federated: public handler FEDERATED_SHARE *share; /* Shared lock info */ MYSQL *mysql; /* MySQL connection */ MYSQL_RES *stored_result; - uint ref_length; uint fetch_num; // stores the fetch num MYSQL_ROW_OFFSET current_position; // Current position used by ::position() int remote_error_number; @@ -164,7 +164,8 @@ private: return 0 on success return errorcode otherwise */ - uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row); + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, + MYSQL_RES *result); bool create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, @@ -304,6 +305,13 @@ public: int connection_rollback(); int connection_autocommit(bool state); int execute_simple_query(const char *query, int len); + + int read_next(byte *buf, MYSQL_RES *result); + int index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result); }; int federated_db_init(void); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8b17dae9d7e..64f0cc0b76e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2473,9 +2473,7 @@ int ha_ndbcluster::write_row(byte *record) m_skip_auto_increment= FALSE; update_auto_increment(); - /* Ensure that handler is always called for auto_increment values */ - thd->next_insert_id= 0; - m_skip_auto_increment= !auto_increment_column_changed; + m_skip_auto_increment= (insert_id_for_cur_row == 0); } } @@ -2631,9 +2629,10 @@ int ha_ndbcluster::write_row(byte *record) { Ndb *ndb= get_ndb(); Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; + char buff[22]; DBUG_PRINT("info", - ("Trying to set next auto increment value to %llu", - (ulonglong) next_val)); + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); Ndb_tuple_id_range_guard g(m_share); if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) == -1) @@ -3067,11 +3066,13 @@ void ha_ndbcluster::unpack_record(byte *buf) // Table with hidden primary key int hidden_no= table_share->fields; const NDBTAB *tab= m_table; + char buff[22]; const NDBCOL *hidden_col= tab->getColumn(hidden_no); const NdbRecAttr* rec= m_value[hidden_no].rec; DBUG_ASSERT(rec); - DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no, - hidden_col->getName(), rec->u_64_value())); + DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no, + hidden_col->getName(), + llstr(rec->u_64_value(), buff))); } //DBUG_EXECUTE("value", print_results();); #endif @@ -3663,20 +3664,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) switch (operation) { case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); - if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index) - { - DBUG_PRINT("info", ("Turning ON use of write instead of insert")); - m_use_write= TRUE; - } else - { - DBUG_PRINT("info", ("Ignoring duplicate key")); - m_ignore_dup_key= TRUE; - } + DBUG_PRINT("info", ("Ignoring duplicate key")); + m_ignore_dup_key= TRUE; break; case HA_EXTRA_NO_IGNORE_DUP_KEY: DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); - DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); - m_use_write= FALSE; m_ignore_dup_key= FALSE; break; case HA_EXTRA_IGNORE_NO_KEY: @@ -3689,6 +3681,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); m_ignore_no_key= FALSE; break; + case HA_EXTRA_WRITE_CAN_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); + if (!m_has_unique_index) + { + DBUG_PRINT("info", ("Turning ON use of write instead of insert")); + m_use_write= TRUE; + } + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE")); + DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); + m_use_write= FALSE; + break; default: break; } @@ -4502,10 +4507,11 @@ static int create_ndb_column(NDBCOL &col, // Set autoincrement if (field->flags & AUTO_INCREMENT_FLAG) { + char buff[22]; col.setAutoIncrement(TRUE); ulonglong value= info->auto_increment_value ? info->auto_increment_value : (ulonglong) 1; - DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value)); + DBUG_PRINT("info", ("Autoincrement key, initial: %s", llstr(value, buff))); col.setAutoIncrementInitialValue(value); } else @@ -6822,8 +6828,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, if (share->commit_count != 0) { *commit_count= share->commit_count; - DBUG_PRINT("info", ("Getting commit_count: %llu from share", - share->commit_count)); + char buff[22]; + DBUG_PRINT("info", ("Getting commit_count: %s from share", + llstr(share->commit_count, buff))); pthread_mutex_unlock(&share->mutex); free_share(&share); DBUG_RETURN(0); @@ -6851,7 +6858,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, pthread_mutex_lock(&share->mutex); if (share->commit_count_lock == lock) { - DBUG_PRINT("info", ("Setting commit_count to %llu", stat.commit_count)); + char buff[22]; + DBUG_PRINT("info", ("Setting commit_count to %s", + llstr(stat.commit_count, buff))); share->commit_count= stat.commit_count; *commit_count= stat.commit_count; } @@ -6901,13 +6910,12 @@ ndbcluster_cache_retrieval_allowed(THD *thd, char *full_name, uint full_name_len, ulonglong *engine_data) { - DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); - Uint64 commit_count; bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); char *dbname= full_name; char *tabname= dbname+strlen(dbname)+1; - + char buff[22], buff2[22]; + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d", dbname, tabname, is_autocommit)); @@ -6923,8 +6931,8 @@ ndbcluster_cache_retrieval_allowed(THD *thd, DBUG_PRINT("exit", ("No, could not retrieve commit_count")); DBUG_RETURN(FALSE); } - DBUG_PRINT("info", ("*engine_data: %llu, commit_count: %llu", - *engine_data, commit_count)); + DBUG_PRINT("info", ("*engine_data: %s, commit_count: %s", + llstr(*engine_data, buff), llstr(commit_count, buff2))); if (commit_count == 0) { *engine_data= 0; /* invalidate */ @@ -6938,7 +6946,8 @@ ndbcluster_cache_retrieval_allowed(THD *thd, DBUG_RETURN(FALSE); } - DBUG_PRINT("exit", ("OK to use cache, engine_data: %llu", *engine_data)); + DBUG_PRINT("exit", ("OK to use cache, engine_data: %s", + llstr(*engine_data, buff))); DBUG_RETURN(TRUE); } @@ -6971,10 +6980,10 @@ ha_ndbcluster::register_query_cache_table(THD *thd, qc_engine_callback *engine_callback, ulonglong *engine_data) { - DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); - + Uint64 commit_count; + char buff[22]; bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - + DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d", m_dbname, m_tabname, is_autocommit)); @@ -6984,7 +6993,6 @@ ha_ndbcluster::register_query_cache_table(THD *thd, DBUG_RETURN(FALSE); } - Uint64 commit_count; if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) { *engine_data= 0; @@ -6993,7 +7001,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd, } *engine_data= commit_count; *engine_callback= ndbcluster_cache_retrieval_allowed; - DBUG_PRINT("exit", ("commit_count: %llu", commit_count)); + DBUG_PRINT("exit", ("commit_count: %s", llstr(commit_count, buff))); DBUG_RETURN(commit_count > 0); } @@ -7409,12 +7417,13 @@ int ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, struct Ndb_statistics * ndbstat) { - DBUG_ENTER("ndb_get_table_statistics"); - DBUG_PRINT("enter", ("table: %s", ndbtab->getName())); NdbTransaction* pTrans; NdbError error; int retries= 10; int retry_sleep= 30 * 1000; /* 30 milliseconds */ + char buff[22], buff2[22], buff3[22], buff4[22]; + DBUG_ENTER("ndb_get_table_statistics"); + DBUG_PRINT("enter", ("table: %s", ndbtab->getName())); DBUG_ASSERT(ndbtab != 0); @@ -7496,10 +7505,13 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, ndbstat->row_size= sum_row_size; ndbstat->fragment_memory= sum_mem; - DBUG_PRINT("exit", ("records: %llu commits: %llu " - "row_size: %llu mem: %llu count: %u", - sum_rows, sum_commits, sum_row_size, - sum_mem, count)); + DBUG_PRINT("exit", ("records: %s commits: %s " + "row_size: %s mem: %s count: %u", + llstr(sum_rows, buff), + llstr(sum_commits, buff2), + llstr(sum_row_size, buff3), + llstr(sum_mem, buff4), + count)); DBUG_RETURN(0); retry: @@ -8151,9 +8163,12 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) if (ndbtab_g.get_table() && ndb_get_table_statistics(ndb, ndbtab_g.get_table(), &stat) == 0) { + char buff[22], buff2[22]; DBUG_PRINT("ndb_util_thread", ("Table: %s, commit_count: %llu, rows: %llu", - share->key, stat.commit_count, stat.row_count)); + share->key, + llstr(stat.commit_count, buff), + llstr(stat.row_count, buff2))); } else { diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index c0257e08537..5b15c4b32bd 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4666,6 +4666,27 @@ int ha_partition::extra(enum ha_extra_function operation) */ break; } + case HA_EXTRA_WRITE_CAN_REPLACE: + case HA_EXTRA_WRITE_CANNOT_REPLACE: + { + /* + Informs handler that write_row() can replace rows which conflict + with row being inserted by PK/unique key without reporting error + to the SQL-layer. + + This optimization is not safe for partitioned table in general case + since we may have to put new version of row into partition which is + different from partition in which old version resides (for example + when we partition by non-PK column or by some column which is not + part of unique key which were violated). + And since NDB which is the only engine at the moment that supports + this optimization handles partitioning on its own we simple disable + it here. (BTW for NDB this optimization is safe since it supports + only KEY partitioning and won't use this optimization for tables + which have additional unique constraints). + */ + break; + } default: { /* Temporary crash to discover what is wrong */ @@ -5263,7 +5284,7 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2) MODULE auto increment ****************************************************************************/ -void ha_partition::restore_auto_increment() +void ha_partition::restore_auto_increment(ulonglong) { DBUG_ENTER("ha_partition::restore_auto_increment"); diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 4c627cd50f8..f9fe5f2501f 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -811,7 +811,7 @@ public: auto_increment_column_changed ------------------------------------------------------------------------- */ - virtual void restore_auto_increment(); + virtual void restore_auto_increment(ulonglong prev_insert_id); virtual void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong *first_value, diff --git a/sql/handler.cc b/sql/handler.cc index 0dfb31fba8c..b356102a61a 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1514,7 +1514,10 @@ int handler::read_first_row(byte * buf, uint primary_key) } /* - Generate the next auto-increment number based on increment and offset + Generate the next auto-increment number based on increment and offset: + computes the lowest number + - strictly greater than "nr" + - of the form: auto_increment_offset + N * auto_increment_increment In most cases increment= offset= 1, in which case we get: 1,2,3,4,5,... @@ -1523,8 +1526,10 @@ int handler::read_first_row(byte * buf, uint primary_key) */ inline ulonglong -next_insert_id(ulonglong nr,struct system_variables *variables) +compute_next_insert_id(ulonglong nr,struct system_variables *variables) { + if (variables->auto_increment_increment == 1) + return (nr+1); // optimization of the formula below nr= (((nr+ variables->auto_increment_increment - variables->auto_increment_offset)) / (ulonglong) variables->auto_increment_increment); @@ -1533,6 +1538,58 @@ next_insert_id(ulonglong nr,struct system_variables *variables) } +void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr) +{ + /* + If we have set THD::next_insert_id previously and plan to insert an + explicitely-specified value larger than this, we need to increase + THD::next_insert_id to be greater than the explicit value. + */ + if ((next_insert_id > 0) && (nr >= next_insert_id)) + set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables)); +} + + +/* + Computes the largest number X: + - smaller than or equal to "nr" + - of the form: auto_increment_offset + N * auto_increment_increment + where N>=0. + + SYNOPSIS + prev_insert_id + nr Number to "round down" + variables variables struct containing auto_increment_increment and + auto_increment_offset + + RETURN + The number X if it exists, "nr" otherwise. +*/ + +inline ulonglong +prev_insert_id(ulonglong nr, struct system_variables *variables) +{ + if (unlikely(nr < variables->auto_increment_offset)) + { + /* + There's nothing good we can do here. That is a pathological case, where + the offset is larger than the column's max possible value, i.e. not even + the first sequence value may be inserted. User will receive warning. + */ + DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour " + "auto_increment_offset: %lu", + nr, variables->auto_increment_offset)); + return nr; + } + if (variables->auto_increment_increment == 1) + return nr; // optimization of the formula below + nr= (((nr - variables->auto_increment_offset)) / + (ulonglong) variables->auto_increment_increment); + return (nr * (ulonglong) variables->auto_increment_increment + + variables->auto_increment_offset); +} + + /* Update the auto_increment field if necessary @@ -1546,7 +1603,7 @@ next_insert_id(ulonglong nr,struct system_variables *variables) IMPLEMENTATION - Updates columns with type NEXT_NUMBER if: + Updates the record's Field of type NEXT_NUMBER if: - If column value is set to NULL (in which case auto_increment_field_not_null is 0) @@ -1554,25 +1611,31 @@ next_insert_id(ulonglong nr,struct system_variables *variables) set. In the future we will only set NEXT_NUMBER fields if one sets them to NULL (or they are not included in the insert list). + In those cases, we check if the currently reserved interval still has + values we have not used. If yes, we pick the smallest one and use it. + Otherwise: - There are two different cases when the above is true: - - - thd->next_insert_id == 0 (This is the normal case) - In this case we set the set the column for the first row to the value - next_insert_id(get_auto_increment(column))) which is normally - max-used-column-value +1. + - If a list of intervals has been provided to the statement via SET + INSERT_ID or via an Intvar_log_event (in a replication slave), we pick the + first unused interval from this list, consider it as reserved. - We call get_auto_increment() only for the first row in a multi-row - statement. For the following rows we generate new numbers based on the - last used number. + - Otherwise we set the column for the first row to the value + next_insert_id(get_auto_increment(column))) which is usually + max-used-column-value+1. + We call get_auto_increment() for the first row in a multi-row + statement. get_auto_increment() will tell us the interval of values it + reserved for us. - - thd->next_insert_id != 0. This happens when we have read an Intvar event - of type INSERT_ID_EVENT from the binary log or when one has used SET - INSERT_ID=#. + - In both cases, for the following rows we use those reserved values without + calling the handler again (we just progress in the interval, computing + each new value from the previous one). Until we have exhausted them, then + we either take the next provided interval or call get_auto_increment() + again to reserve a new interval. - In this case we will set the column to the value of next_insert_id. - The next row will be given the id - next_insert_id(next_insert_id) + - In both cases, the reserved intervals are remembered in + thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based + binlogging; the last reserved interval is remembered in + auto_inc_interval_for_cur_row. The idea is that generated auto_increment values are predictable and independent of the column values in the table. This is needed to be @@ -1583,7 +1646,13 @@ next_insert_id(ulonglong nr,struct system_variables *variables) inserts a column with a higher value than the last used one, we will start counting from the inserted value. - thd->next_insert_id is cleared after it's been used for a statement. + This function's "outputs" are: the table's auto_increment field is filled + with a value, thd->next_insert_id is filled with the value to use for the + next row, if a value was autogenerated for the current row it is stored in + thd->insert_id_for_cur_row, if get_auto_increment() was called + thd->auto_inc_interval_for_cur_row is modified, if that interval is not + present in thd->auto_inc_intervals_in_cur_stmt_for_binlog it is added to + this list. TODO @@ -1600,7 +1669,8 @@ next_insert_id(ulonglong nr,struct system_variables *variables) bool handler::update_auto_increment() { - ulonglong nr; + ulonglong nr, nb_reserved_values; + bool append= FALSE; THD *thd= table->in_use; struct system_variables *variables= &thd->variables; bool auto_increment_field_not_null; @@ -1608,10 +1678,10 @@ bool handler::update_auto_increment() DBUG_ENTER("handler::update_auto_increment"); /* - We must save the previous value to be able to restore it if the - row was not inserted + next_insert_id is a "cursor" into the reserved interval, it may go greater + than the interval, but not smaller. */ - thd->prev_insert_id= thd->next_insert_id; + DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum()); auto_increment_field_not_null= table->auto_increment_field_not_null; table->auto_increment_field_not_null= FALSE; // to reset for next row @@ -1620,131 +1690,140 @@ bool handler::update_auto_increment() thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { /* - The user did specify a value for the auto_inc column, we don't generate - a new value, write it down. - */ - auto_increment_column_changed=0; - - /* Update next_insert_id if we had already generated a value in this statement (case of INSERT VALUES(null),(3763),(null): the last NULL needs to insert 3764, not the value of the first NULL plus 1). */ - if (thd->clear_next_insert_id && nr >= thd->next_insert_id) - { - if (variables->auto_increment_increment != 1) - nr= next_insert_id(nr, variables); - else - nr++; - thd->next_insert_id= nr; - DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr)); - } + adjust_next_insert_id_after_explicit_value(nr); + insert_id_for_cur_row= 0; // didn't generate anything DBUG_RETURN(0); } - if (!(nr= thd->next_insert_id)) + + if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum()) { - ulonglong nb_desired_values= 1, nb_reserved_values; -#ifdef TO_BE_ENABLED_SOON - /* - Reserved intervals will be stored in "THD::auto_inc_intervals". - handler::estimation_rows_to_insert will be the argument passed by - handler::ha_start_bulk_insert(). - */ - uint estimation_known= test(estimation_rows_to_insert > 0); - uint nb_already_reserved_intervals= thd->auto_inc_intervals.nb_elements(); - /* - If an estimation was given to the engine: - - use it. - - if we already reserved numbers, it means the estimation was - not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_VALUES the 2nd - time, twice that the 3rd time etc. - If no estimation was given, use those increasing defaults from the - start, starting from AUTO_INC_DEFAULT_NB_VALUES. - Don't go beyond a max to not reserve "way too much" (because reservation - means potentially losing unused values). - */ - if (nb_already_reserved_intervals == 0 && estimation_known) - nb_desired_values= estimation_rows_to_insert; - else /* go with the increasing defaults */ + /* next_insert_id is beyond what is reserved, so we reserve more. */ + const Discrete_interval *forced= + thd->auto_inc_intervals_forced.get_next(); + if (forced != NULL) { - /* avoid overflow in formula, with this if() */ - if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS) + nr= forced->minimum(); + nb_reserved_values= forced->values(); + } + else + { + /* + handler::estimation_rows_to_insert was set by + handler::ha_start_bulk_insert(); if 0 it means "unknown". + */ + uint nb_already_reserved_intervals= + thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements(); + ulonglong nb_desired_values; + /* + If an estimation was given to the engine: + - use it. + - if we already reserved numbers, it means the estimation was + not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_ROWS the 2nd + time, twice that the 3rd time etc. + If no estimation was given, use those increasing defaults from the + start, starting from AUTO_INC_DEFAULT_NB_ROWS. + Don't go beyond a max to not reserve "way too much" (because + reservation means potentially losing unused values). + */ + if (nb_already_reserved_intervals == 0 && + (estimation_rows_to_insert > 0)) + nb_desired_values= estimation_rows_to_insert; + else /* go with the increasing defaults */ { - nb_desired_values= AUTO_INC_DEFAULT_NB_VALUES * - (1 << nb_already_reserved_intervals); - set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX); + /* avoid overflow in formula, with this if() */ + if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS) + { + nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS * + (1 << nb_already_reserved_intervals); + set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX); + } + else + nb_desired_values= AUTO_INC_DEFAULT_NB_MAX; } - else - nb_desired_values= AUTO_INC_DEFAULT_NB_MAX; + /* This call ignores all its parameters but nr, currently */ + get_auto_increment(variables->auto_increment_offset, + variables->auto_increment_increment, + nb_desired_values, &nr, + &nb_reserved_values); + if (nr == ~(ulonglong) 0) + result= 1; // Mark failure + + /* + That rounding below should not be needed when all engines actually + respect offset and increment in get_auto_increment(). But they don't + so we still do it. Wonder if for the not-first-in-index we should do + it. Hope that this rounding didn't push us out of the interval; even + if it did we cannot do anything about it (calling the engine again + will not help as we inserted no row). + */ + nr= compute_next_insert_id(nr-1, variables); + } + + if (table->s->next_number_key_offset == 0) + { + /* We must defer the appending until "nr" has been possibly truncated */ + append= TRUE; + } + else + { + /* + For such auto_increment there is no notion of interval, just a + singleton. The interval is not even stored in + thd->auto_inc_interval_for_cur_row, so we are sure to call the engine + for next row. + */ + DBUG_PRINT("info",("auto_increment: special not-first-in-index")); } -#endif - /* This call ignores all its parameters but nr, currently */ - get_auto_increment(variables->auto_increment_offset, - variables->auto_increment_increment, - nb_desired_values, &nr, - &nb_reserved_values); - if (nr == ~(ulonglong) 0) - result= 1; // Mark failure - - /* - That should not be needed when engines actually use offset and increment - above. - */ - if (variables->auto_increment_increment != 1) - nr= next_insert_id(nr-1, variables); - /* - Update next row based on the found value. This way we don't have to - call the handler for every generated auto-increment value on a - multi-row statement - */ - thd->next_insert_id= nr; } DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr)); - /* Mark that we should clear next_insert_id before next stmt */ - thd->clear_next_insert_id= 1; - - if (!table->next_number_field->store((longlong) nr, TRUE)) - thd->insert_id((ulonglong) nr); - else - thd->insert_id(table->next_number_field->val_int()); - - /* - We can't set next_insert_id if the auto-increment key is not the - first key part, as there is no guarantee that the first parts will be in - sequence - */ - if (!table->s->next_number_key_offset) + if (unlikely(table->next_number_field->store((longlong) nr, TRUE))) { /* - Set next insert id to point to next auto-increment value to be able to - handle multi-row statements - This works even if auto_increment_increment > 1 + field refused this value (overflow) and truncated it, use the result of + the truncation (which is going to be inserted); however we try to + decrease it to honour auto_increment_* variables. + That will shift the left bound of the reserved interval, we don't + bother shifting the right bound (anyway any other value from this + interval will cause a duplicate key). */ - thd->next_insert_id= next_insert_id(nr, variables); + nr= prev_insert_id(table->next_number_field->val_int(), variables); + if (unlikely(table->next_number_field->store((longlong) nr, TRUE))) + nr= table->next_number_field->val_int(); + } + if (append) + { + auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values, + variables->auto_increment_increment); + /* Row-based replication does not need to store intervals in binlog */ + if (!thd->current_stmt_binlog_row_based) + result= result || + thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(), + auto_inc_interval_for_cur_row.values(), + variables->auto_increment_increment); } - else - thd->next_insert_id= 0; - - /* Mark that we generated a new value */ - auto_increment_column_changed=1; - DBUG_RETURN(result); -} - -/* - restore_auto_increment - In case of error on write, we restore the last used next_insert_id value - because the previous value was not used. -*/ + /* + Record this autogenerated value. If the caller then + succeeds to insert this value, it will call + record_first_successful_insert_id_in_cur_stmt() + which will set first_successful_insert_id_in_cur_stmt if it's not + already set. + */ + insert_id_for_cur_row= nr; + /* + Set next insert id to point to next auto-increment value to be able to + handle multi-row statements. + */ + set_next_insert_id(compute_next_insert_id(nr, variables)); -void handler::restore_auto_increment() -{ - THD *thd= table->in_use; - if (thd->next_insert_id) - thd->next_insert_id= thd->prev_insert_id; + DBUG_RETURN(result); } @@ -1840,6 +1919,23 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, } +void handler::ha_release_auto_increment() +{ + release_auto_increment(); + insert_id_for_cur_row= 0; + auto_inc_interval_for_cur_row.replace(0, 0, 0); + if (next_insert_id > 0) + { + next_insert_id= 0; + /* + this statement used forced auto_increment values if there were some, + wipe them away for other statements. + */ + table->in_use->auto_inc_intervals_forced.empty(); + } +} + + void handler::print_keydup_error(uint key_nr, const char *msg) { /* Write the duplicated key in the error message */ @@ -3369,10 +3465,13 @@ namespace int handler::ha_external_lock(THD *thd, int lock_type) { DBUG_ENTER("handler::ha_external_lock"); - int error; - if (unlikely(error= external_lock(thd, lock_type))) - DBUG_RETURN(error); - DBUG_RETURN(0); + /* + Whether this is lock or unlock, this should be true, and is to verify that + if get_auto_increment() was called (thus may have reserved intervals or + taken a table lock), ha_release_auto_increment() was too. + */ + DBUG_ASSERT(next_insert_id == 0); + DBUG_RETURN(external_lock(thd, lock_type)); } diff --git a/sql/handler.h b/sql/handler.h index 27223de6111..3c090b887a3 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -906,16 +906,37 @@ public: uint ref_length; FT_INFO *ft_handler; enum {NONE=0, INDEX, RND} inited; - bool auto_increment_column_changed; bool implicit_emptied; /* Can be !=0 only if HEAP */ const COND *pushed_cond; + /* + next_insert_id is the next value which should be inserted into the + auto_increment column: in a inserting-multi-row statement (like INSERT + SELECT), for the first row where the autoinc value is not specified by the + statement, get_auto_increment() called and asked to generate a value, + next_insert_id is set to the next value, then for all other rows + next_insert_id is used (and increased each time) without calling + get_auto_increment(). + */ + ulonglong next_insert_id; + /* + insert id for the current row (*autogenerated*; if not + autogenerated, it's 0). + At first successful insertion, this variable is stored into + THD::first_successful_insert_id_in_cur_stmt. + */ + ulonglong insert_id_for_cur_row; + /* + Interval returned by get_auto_increment() and being consumed by the + inserter. + */ + Discrete_interval auto_inc_interval_for_cur_row; handler(const handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg), ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), ft_handler(0), inited(NONE), implicit_emptied(0), - pushed_cond(NULL) + pushed_cond(NULL), next_insert_id(0), insert_id_for_cur_row(0) {} virtual ~handler(void) { @@ -954,6 +975,7 @@ public: return TRUE; } int ha_open(TABLE *table, const char *name, int mode, int test_if_locked); + void adjust_next_insert_id_after_explicit_value(ulonglong nr); bool update_auto_increment(); void print_keydup_error(uint key_nr, const char *msg); virtual void print_error(int error, myf errflag); @@ -1247,9 +1269,30 @@ public: ulonglong nb_desired_values, ulonglong *first_value, ulonglong *nb_reserved_values); +private: virtual void release_auto_increment() { return; }; - virtual void restore_auto_increment(); - +public: + void ha_release_auto_increment(); + void set_next_insert_id(ulonglong id) + { + DBUG_PRINT("info",("auto_increment: next value %lu", (ulong)id)); + next_insert_id= id; + } + void restore_auto_increment(ulonglong prev_insert_id) + { + /* + Insertion of a row failed, re-use the lastly generated auto_increment + id, for the next row. This is achieved by resetting next_insert_id to + what it was before the failed insertion (that old value is provided by + the caller). If that value was 0, it was the first row of the INSERT; + then if insert_id_for_cur_row contains 0 it means no id was generated + for this first row, so no id was generated since the INSERT started, so + we should set next_insert_id to 0; if insert_id_for_cur_row is not 0, it + is the generated id of the first and failed row, so we use it. + */ + next_insert_id= (prev_insert_id > 0) ? prev_insert_id : + insert_id_for_cur_row; + } /* Reset the auto-increment counter to the given value, i.e. the next row inserted will get the given value. This is called e.g. after TRUNCATE diff --git a/sql/item.cc b/sql/item.cc index 53797052788..eff2b744972 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1343,35 +1343,37 @@ void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3, static -void my_coll_agg_error(Item** args, uint count, const char *fname) +void my_coll_agg_error(Item** args, uint count, const char *fname, + int item_sep) { if (count == 2) - my_coll_agg_error(args[0]->collation, args[1]->collation, fname); + my_coll_agg_error(args[0]->collation, args[item_sep]->collation, fname); else if (count == 3) - my_coll_agg_error(args[0]->collation, args[1]->collation, - args[2]->collation, fname); + my_coll_agg_error(args[0]->collation, args[item_sep]->collation, + args[2*item_sep]->collation, fname); else my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname); } bool agg_item_collations(DTCollation &c, const char *fname, - Item **av, uint count, uint flags) + Item **av, uint count, uint flags, int item_sep) { uint i; + Item **arg; c.set(av[0]->collation); - for (i= 1; i < count; i++) + for (i= 1, arg= &av[item_sep]; i < count; i++, arg++) { - if (c.aggregate(av[i]->collation, flags)) + if (c.aggregate((*arg)->collation, flags)) { - my_coll_agg_error(av, count, fname); + my_coll_agg_error(av, count, fname, item_sep); return TRUE; } } if ((flags & MY_COLL_DISALLOW_NONE) && c.derivation == DERIVATION_NONE) { - my_coll_agg_error(av, count, fname); + my_coll_agg_error(av, count, fname, item_sep); return TRUE; } return FALSE; @@ -1382,7 +1384,7 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname, Item **av, uint count, uint flags) { return (agg_item_collations(c, fname, av, count, - flags | MY_COLL_DISALLOW_NONE)); + flags | MY_COLL_DISALLOW_NONE, 1)); } @@ -1405,16 +1407,26 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname, For functions with more than two arguments: collect(A,B,C) ::= collect(collect(A,B),C) + + Since this function calls THD::change_item_tree() on the passed Item ** + pointers, it is necessary to pass the original Item **'s, not copies. + Otherwise their values will not be properly restored (see BUG#20769). + If the items are not consecutive (eg. args[2] and args[5]), use the + item_sep argument, ie. + + agg_item_charsets(coll, fname, &args[2], 2, flags, 3) + */ bool agg_item_charsets(DTCollation &coll, const char *fname, - Item **args, uint nargs, uint flags) + Item **args, uint nargs, uint flags, int item_sep) { Item **arg, **last, *safe_args[2]; LINT_INIT(safe_args[0]); LINT_INIT(safe_args[1]); - if (agg_item_collations(coll, fname, args, nargs, flags)) + + if (agg_item_collations(coll, fname, args, nargs, flags, item_sep)) return TRUE; /* @@ -1427,19 +1439,20 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, if (nargs >=2 && nargs <= 3) { safe_args[0]= args[0]; - safe_args[1]= args[1]; + safe_args[1]= args[item_sep]; } THD *thd= current_thd; Query_arena *arena, backup; bool res= FALSE; + uint i; /* In case we're in statement prepare, create conversion item in its memory: it will be reused on each execute. */ arena= thd->activate_stmt_arena_if_needed(&backup); - for (arg= args, last= args + nargs; arg < last; arg++) + for (i= 0, arg= args; i < nargs; i++, arg+= item_sep) { Item* conv; uint32 dummy_offset; @@ -1454,9 +1467,9 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, { /* restore the original arguments for better error message */ args[0]= safe_args[0]; - args[1]= safe_args[1]; + args[item_sep]= safe_args[1]; } - my_coll_agg_error(args, nargs, fname); + my_coll_agg_error(args, nargs, fname, item_sep); res= TRUE; break; // we cannot return here, we need to restore "arena". } @@ -5436,9 +5449,14 @@ void Item_insert_value::print(String *str) void Item_trigger_field::setup_field(THD *thd, TABLE *table, GRANT_INFO *table_grant_info) { + /* + It is too early to mark fields used here, because before execution + of statement that will invoke trigger other statements may use same + TABLE object, so all such mark-up will be wiped out. + So instead we do it in Table_triggers_list::mark_fields_used() + method which is called during execution of these statements. + */ enum_mark_columns save_mark_used_columns= thd->mark_used_columns; - - /* TODO: Think more about consequences of this step. */ thd->mark_used_columns= MARK_COLUMNS_NONE; /* Try to find field by its name and if it will be found diff --git a/sql/item.h b/sql/item.h index a6132aba8b0..5494b885ff9 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1139,12 +1139,11 @@ public: }; bool agg_item_collations(DTCollation &c, const char *name, - Item **items, uint nitems, uint flags= 0); + Item **items, uint nitems, uint flags, int item_sep); bool agg_item_collations_for_comparison(DTCollation &c, const char *name, - Item **items, uint nitems, - uint flags= 0); + Item **items, uint nitems, uint flags); bool agg_item_charsets(DTCollation &c, const char *name, - Item **items, uint nitems, uint flags= 0); + Item **items, uint nitems, uint flags, int item_sep); class Item_num: public Item diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index ce537614386..c9c12b154c4 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -409,7 +409,7 @@ void Item_bool_func2::fix_length_and_dec() DTCollation coll; if (args[0]->result_type() == STRING_RESULT && args[1]->result_type() == STRING_RESULT && - agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV)) + agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV, 1)) return; @@ -1226,7 +1226,7 @@ void Item_func_between::fix_length_and_dec() agg_cmp_type(thd, &cmp_type, args, 3); if (cmp_type == STRING_RESULT) - agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1); } @@ -1346,7 +1346,7 @@ Item_func_ifnull::fix_length_and_dec() switch (hybrid_type) { case STRING_RESULT: - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1); break; case DECIMAL_RESULT: case REAL_RESULT: @@ -1518,7 +1518,7 @@ Item_func_if::fix_length_and_dec() agg_result_type(&cached_result_type, args+1, 2); if (cached_result_type == STRING_RESULT) { - if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV, 1)) return; } else @@ -1599,7 +1599,7 @@ Item_func_nullif::fix_length_and_dec() unsigned_flag= args[0]->unsigned_flag; cached_result_type= args[0]->result_type(); if (cached_result_type == STRING_RESULT && - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV)) + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1)) return; } } @@ -1891,7 +1891,7 @@ void Item_func_case::fix_length_and_dec() agg_result_type(&cached_result_type, agg, nagg); if ((cached_result_type == STRING_RESULT) && - agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV)) + agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV, 1)) return; @@ -1907,7 +1907,7 @@ void Item_func_case::fix_length_and_dec() nagg++; agg_cmp_type(thd, &cmp_type, agg, nagg); if ((cmp_type == STRING_RESULT) && - agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV)) + agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) return; } @@ -2037,7 +2037,7 @@ void Item_func_coalesce::fix_length_and_dec() case STRING_RESULT: count_only_length(); decimals= NOT_FIXED_DEC; - agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV); + agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1); break; case DECIMAL_RESULT: count_decimal_length(); @@ -2501,7 +2501,7 @@ void Item_func_in::fix_length_and_dec() agg_cmp_type(thd, &cmp_type, args, arg_count); if (cmp_type == STRING_RESULT && - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV)) + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) return; for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) @@ -3234,7 +3234,7 @@ Item_func_regex::fix_fields(THD *thd, Item **ref) max_length= 1; decimals= 0; - if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1)) return TRUE; used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); diff --git a/sql/item_create.cc b/sql/item_create.cc index 6eca6209438..bf4af2232f7 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -426,7 +426,9 @@ Item *create_func_unhex(Item* a) Item *create_func_uuid(void) { THD *thd= current_thd; - thd->lex->binlog_row_based_if_mixed= 1; +#ifdef HAVE_ROW_BASED_REPLICATION + thd->lex->binlog_row_based_if_mixed= TRUE; +#endif return new(thd->mem_root) Item_func_uuid(); } diff --git a/sql/item_func.cc b/sql/item_func.cc index 8139ba81777..1aea388fdbb 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2036,7 +2036,7 @@ void Item_func_min_max::fix_length_and_dec() cmp_type=item_cmp_type(cmp_type,args[i]->result_type()); } if (cmp_type == STRING_RESULT) - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1); else if ((cmp_type == DECIMAL_RESULT) || (cmp_type == INT_RESULT)) max_length= my_decimal_precision_to_length(max_int_part+decimals, decimals, unsigned_flag); @@ -2222,7 +2222,7 @@ longlong Item_func_coercibility::val_int() void Item_func_locate::fix_length_and_dec() { maybe_null=0; max_length=11; - agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } @@ -2339,7 +2339,7 @@ void Item_func_field::fix_length_and_dec() for (uint i=1; i < arg_count ; i++) cmp_type= item_cmp_type(cmp_type, args[i]->result_type()); if (cmp_type == STRING_RESULT) - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1); } @@ -2406,7 +2406,7 @@ void Item_func_find_in_set::fix_length_and_dec() } } } - agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } static const char separator=','; @@ -3283,12 +3283,20 @@ longlong Item_func_last_insert_id::val_int() if (arg_count) { longlong value= args[0]->val_int(); - thd->insert_id(value); null_value= args[0]->null_value; - return value; // Avoid side effect of insert_id() + /* + LAST_INSERT_ID(X) must affect the client's mysql_insert_id() as + documented in the manual. We don't want to touch + first_successful_insert_id_in_cur_stmt because it would make + LAST_INSERT_ID(X) take precedence over an generated auto_increment + value for this row. + */ + thd->arg_of_last_insert_id_function= TRUE; + thd->first_successful_insert_id_in_prev_stmt= value; + return value; } thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - return thd->last_insert_id_used ? thd->current_insert_id : thd->insert_id(); + return thd->read_first_successful_insert_id_in_prev_stmt(); } /* This function is just used to test speed of different functions */ @@ -4393,7 +4401,8 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref) return 1; } table->fulltext_searched=1; - return agg_arg_collations_for_comparison(cmp_collation, args+1, arg_count-1); + return agg_arg_collations_for_comparison(cmp_collation, + args+1, arg_count-1, 0); } bool Item_func_match::fix_index() diff --git a/sql/item_func.h b/sql/item_func.h index 0aedae73bdc..375567838f8 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -166,21 +166,21 @@ public: my_decimal *val_decimal(my_decimal *); bool agg_arg_collations(DTCollation &c, Item **items, uint nitems, - uint flags= 0) + uint flags) { - return agg_item_collations(c, func_name(), items, nitems, flags); + return agg_item_collations(c, func_name(), items, nitems, flags, 1); } bool agg_arg_collations_for_comparison(DTCollation &c, Item **items, uint nitems, - uint flags= 0) + uint flags) { return agg_item_collations_for_comparison(c, func_name(), items, nitems, flags); } bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, - uint flags= 0) + uint flags, int item_sep) { - return agg_item_charsets(c, func_name(), items, nitems, flags); + return agg_item_charsets(c, func_name(), items, nitems, flags, item_sep); } bool walk(Item_processor processor, bool walk_subquery, byte *arg); Item *transform(Item_transformer transformer, byte *arg); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 2b1fd96601d..9019b37da59 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -405,7 +405,7 @@ void Item_func_concat::fix_length_and_dec() { ulonglong max_result_length= 0; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; for (uint i=0 ; i < arg_count ; i++) @@ -727,7 +727,7 @@ void Item_func_concat_ws::fix_length_and_dec() { ulonglong max_result_length; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; /* @@ -937,7 +937,7 @@ void Item_func_replace::fix_length_and_dec() } max_length= (ulong) max_result_length; - if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV, 1)) return; } @@ -982,15 +982,11 @@ null: void Item_func_insert::fix_length_and_dec() { - Item *cargs[2]; ulonglong max_result_length; - cargs[0]= args[0]; - cargs[1]= args[3]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[3]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 3)) return; - args[0]= cargs[0]; - args[3]= cargs[1]; max_result_length= ((ulonglong) args[0]->max_length+ (ulonglong) args[3]->max_length); if (max_result_length >= MAX_BLOB_WIDTH) @@ -1161,7 +1157,7 @@ void Item_func_substr_index::fix_length_and_dec() { max_length= args[0]->max_length; - if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV, 1)) return; } @@ -1497,13 +1493,10 @@ void Item_func_trim::fix_length_and_dec() } else { - Item *cargs[2]; - cargs[0]= args[1]; - cargs[1]= args[0]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_CMP_CONV)) + // Handle character set for args[1] and args[0]. + // Note that we pass args[1] as the first item, and args[0] as the second. + if (agg_arg_charsets(collation, &args[1], 2, MY_COLL_CMP_CONV, -1)) return; - args[0]= cargs[1]; - args[1]= cargs[0]; } } @@ -1887,7 +1880,7 @@ void Item_func_elt::fix_length_and_dec() max_length=0; decimals=0; - if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV, 1)) return; for (uint i= 1 ; i < arg_count ; i++) @@ -1954,7 +1947,7 @@ void Item_func_make_set::fix_length_and_dec() { max_length=arg_count-1; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; for (uint i=0 ; i < arg_count ; i++) @@ -2162,14 +2155,9 @@ err: void Item_func_rpad::fix_length_and_dec() { - Item *cargs[2]; - - cargs[0]= args[0]; - cargs[1]= args[2]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[2]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2)) return; - args[0]= cargs[0]; - args[2]= cargs[1]; if (args[1]->const_item()) { ulonglong length= ((ulonglong) args[1]->val_int() * @@ -2249,13 +2237,9 @@ String *Item_func_rpad::val_str(String *str) void Item_func_lpad::fix_length_and_dec() { - Item *cargs[2]; - cargs[0]= args[0]; - cargs[1]= args[2]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[2]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2)) return; - args[0]= cargs[0]; - args[2]= cargs[1]; if (args[1]->const_item()) { @@ -2712,8 +2696,8 @@ void Item_func_export_set::fix_length_and_dec() uint sep_length=(arg_count > 3 ? args[3]->max_length : 1); max_length=length*64+sep_length*63; - if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1), - MY_COLL_ALLOW_CONV) + if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1, + MY_COLL_ALLOW_CONV, 1)) return; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 7619f6a58f7..51c8ba6e5da 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -3248,7 +3248,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) args, /* skip charset aggregation for order columns */ arg_count - arg_count_order, - MY_COLL_ALLOW_CONV)) + MY_COLL_ALLOW_CONV, 1)) return 1; result.set_charset(collation.collation); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index fb5ca083eab..ed4b81c897f 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -2396,7 +2396,7 @@ void Item_xml_str_func::fix_length_and_dec() nodeset_func= 0; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1)) return; if (collation.collation->mbminlen > 1) diff --git a/sql/log.cc b/sql/log.cc index cdae7885046..dba4b65efd9 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -432,16 +432,23 @@ bool Log_to_csv_event_handler:: table->field[6]->set_notnull(); } - if (thd->last_insert_id_used) + if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt) { - table->field[7]->store((longlong) thd->current_insert_id, TRUE); + table->field[7]->store((longlong) + thd->first_successful_insert_id_in_prev_stmt_for_binlog, TRUE); table->field[7]->set_notnull(); } - /* set value if we do an insert on autoincrement column */ - if (thd->insert_id_used) + /* + Set value if we do an insert on autoincrement column. Note that for + some engines (those for which get_auto_increment() does not leave a + table lock until the statement ends), this is just the first value and + the next ones used may not be contiguous to it. + */ + if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) { - table->field[8]->store((longlong) thd->last_insert_id, TRUE); + table->field[8]->store((longlong) + thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(), TRUE); table->field[8]->set_notnull(); } @@ -731,7 +738,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, Security_context *sctx= thd->security_ctx; uint message_buff_len= 0, user_host_len= 0; longlong query_time= 0, lock_time= 0; - longlong last_insert_id= 0, insert_id= 0; /* Print the message to the buffer if we have slow log enabled @@ -766,13 +772,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, lock_time= (longlong) (thd->time_after_lock - query_start_arg); } - if (thd->last_insert_id_used) - last_insert_id= (longlong) thd->current_insert_id; - - /* set value if we do an insert on autoincrement column */ - if (thd->insert_id_used) - insert_id= (longlong) thd->last_insert_id; - if (!query) { is_command= TRUE; @@ -1931,18 +1930,22 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, tmp_errno= errno; strmov(db,thd->db); } - if (thd->last_insert_id_used) + if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt) { end=strmov(end, ",last_insert_id="); - end=longlong10_to_str((longlong) thd->current_insert_id, end, -10); + end=longlong10_to_str((longlong) + thd->first_successful_insert_id_in_prev_stmt_for_binlog, + end, -10); } // Save value if we do an insert. - if (thd->insert_id_used) + if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) { if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT)) { end=strmov(end,",insert_id="); - end=longlong10_to_str((longlong) thd->last_insert_id, end, -10); + end=longlong10_to_str((longlong) + thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(), + end, -10); } } @@ -3363,21 +3366,24 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) { if (!thd->current_stmt_binlog_row_based) { - if (thd->last_insert_id_used) + if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt) { Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT, - thd->current_insert_id); + thd->first_successful_insert_id_in_prev_stmt_for_binlog); if (e.write(file)) goto err; } - if (thd->insert_id_used) + if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) { + DBUG_PRINT("info",("number of auto_inc intervals: %lu", + thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements())); /* If the auto_increment was second in a table's index (possible with MyISAM or BDB) (table->next_number_key_offset != 0), such event is in fact not necessary. We could avoid logging it. */ - Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id); + Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT, + thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum()); if (e.write(file)) goto err; } @@ -3404,6 +3410,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) } } } + /* Forget those values, for next binlogger: */ + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); } /* diff --git a/sql/log_event.cc b/sql/log_event.cc index 823fad1e8e2..ebd90446a7e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1693,14 +1693,17 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) + int Query_log_event::exec_event(struct st_relay_log_info* rli) { return exec_event(rli, query, q_len); } -int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query_arg, uint32 q_len_arg) +int Query_log_event::exec_event(struct st_relay_log_info* rli, + const char *query_arg, uint32 q_len_arg) { + LEX_STRING new_db; int expected_error,actual_error= 0; /* Colleagues: please never free(thd->catalog) in MySQL. This would lead to @@ -1709,8 +1712,9 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query Thank you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; - thd->db_length= db_len; - thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length); + new_db.length= db_len; + new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); + thd->set_db(new_db.str, new_db.length); /* allocates a copy of 'db' */ thd->variables.auto_increment_increment= auto_increment_increment; thd->variables.auto_increment_offset= auto_increment_offset; @@ -1929,11 +1933,21 @@ end: TABLE uses the db.table syntax. */ thd->catalog= 0; - thd->reset_db(NULL, 0); // prevent db from being freed + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; // just to be sure thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); + /* + As a disk space optimization, future masters will not log an event for + LAST_INSERT_ID() if that function returned 0 (and thus they will be able + to replace the THD::stmt_depends_on_first_successful_insert_id_in_prev_stmt + variable by (THD->first_successful_insert_id_in_prev_stmt > 0) ; with the + resetting below we are ready to support that. + */ + thd->first_successful_insert_id_in_prev_stmt_for_binlog= 0; + thd->first_successful_insert_id_in_prev_stmt= 0; + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); /* If there was an error we stop. Otherwise we increment positions. Note that @@ -2896,8 +2910,10 @@ void Load_log_event::set_fields(const char* affected_db, int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors) { - thd->db_length= db_len; - thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length); + LEX_STRING new_db; + new_db.length= db_len; + new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); + thd->set_db(new_db.str, new_db.length); DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; @@ -3100,7 +3116,7 @@ error: const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->catalog= 0; - thd->reset_db(NULL, 0); + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -3425,11 +3441,11 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) { switch (type) { case LAST_INSERT_ID_EVENT: - thd->last_insert_id_used = 1; - thd->last_insert_id = val; + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1; + thd->first_successful_insert_id_in_prev_stmt= val; break; case INSERT_ID_EVENT: - thd->next_insert_id = val; + thd->force_one_auto_inc_interval(val); break; } rli->inc_event_relay_log_pos(); @@ -5353,10 +5369,10 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* lock_tables() reads the contents of thd->lex, so they must be - initialized, so we should call lex_start(); to be even safer, we - call mysql_init_query() which does a more complete set of inits. + initialized. Contrary to in Table_map_log_event::exec_event() we don't + call mysql_init_query() as that may reset the binlog format. */ - mysql_init_query(thd, NULL, 0); + lex_start(thd, NULL, 0); while ((error= lock_tables(thd, rli->tables_to_lock, rli->tables_to_lock_count, &need_reopen))) @@ -5860,6 +5876,12 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) else { /* + open_tables() reads the contents of thd->lex, so they must be + initialized, so we should call lex_start(); to be even safer, we + call mysql_init_query() which does a more complete set of inits. + */ + mysql_init_query(thd, NULL, 0); + /* Check if the slave is set to use SBR. If so, it should switch to using RBR until the end of the "statement", i.e., next STMT_END_F or next error. @@ -5875,12 +5897,6 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) Note that for any table that should not be replicated, a filter is needed. */ uint count; - /* - open_tables() reads the contents of thd->lex, so they must be - initialized, so we should call lex_start(); to be even safer, we - call mysql_init_query() which does a more complete set of inits. - */ - mysql_init_query(thd, NULL, 0); if ((error= open_tables(thd, &table_list, &count, 0))) { if (thd->query_error || thd->is_fatal_error) @@ -6138,6 +6154,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) thd->lex->sql_command= SQLCOM_REPLACE; table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster /* TODO: the cluster team (Tomas?) says that it's better if the engine knows diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 7cff7d7531c..55958bae061 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -872,9 +872,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name, TABLE_LIST *table_list, List<create_field> &fields, List<Key> &keys, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, - bool ignore, + uint order_num, ORDER *order, bool ignore, ALTER_INFO *alter_info, bool do_send_ok); bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); bool mysql_create_like_table(THD *thd, TABLE_LIST *table, @@ -1972,6 +1970,17 @@ inline int hexchar_to_int(char c) } /* + is_user_table() + return true if the table was created explicitly +*/ + +inline bool is_user_table(TABLE * table) +{ + const char *name= table->s->table_name.str; + return strncmp(name, tmp_file_prefix, tmp_file_prefix_length); +} + +/* Some functions that are different in the embedded library and the normal server */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6e57993a61a..93ed663ae06 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -323,7 +323,9 @@ static char *opt_init_slave, *language_ptr, *opt_init_connect; static char *default_character_set_name; static char *character_set_filesystem_name; static char *my_bind_addr_str; -static char *default_collation_name, *default_storage_engine_str; +static char *default_collation_name; +static char *default_storage_engine_str; +static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME; static char mysql_data_home_buff[2]; static struct passwd *user_info; static I_List<THD> thread_cache; @@ -7047,7 +7049,7 @@ static void mysql_init_variables(void) /* Variables in libraries */ charsets_dir= 0; default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; - default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME; + default_collation_name= compiled_default_collation_name; sys_charset_system.value= (char*) system_charset_info->csname; character_set_filesystem_name= (char*) "binary"; @@ -7181,7 +7183,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), strmake(mysql_home,argument,sizeof(mysql_home)-1); break; case 'C': - default_collation_name= 0; + if (default_collation_name == compiled_default_collation_name) + default_collation_name= 0; break; case 'l': opt_log=1; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 8da96497985..d62cb134209 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -85,18 +85,119 @@ static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag); static char is_null_string[2]= {1,0}; + +/* + A construction block of the SEL_ARG-graph. + + The following description only covers graphs of SEL_ARG objects with + sel_arg->type==KEY_RANGE: + + One SEL_ARG object represents an "elementary interval" in form + + min_value <=? table.keypartX <=? max_value + + The interval is a non-empty interval of any kind: with[out] minimum/maximum + bound, [half]open/closed, single-point interval, etc. + + 1. SEL_ARG GRAPH STRUCTURE + + SEL_ARG objects are linked together in a graph. The meaning of the graph + is better demostrated by an example: + + tree->keys[i] + | + | $ $ + | part=1 $ part=2 $ part=3 + | $ $ + | +-------+ $ +-------+ $ +--------+ + | | kp1<1 |--$-->| kp2=5 |--$-->| kp3=10 | + | +-------+ $ +-------+ $ +--------+ + | | $ $ | + | | $ $ +--------+ + | | $ $ | kp3=12 | + | | $ $ +--------+ + | +-------+ $ $ + \->| kp1=2 |--$--------------$-+ + +-------+ $ $ | +--------+ + | $ $ ==>| kp3=11 | + +-------+ $ $ | +--------+ + | kp1=3 |--$--------------$-+ | + +-------+ $ $ +--------+ + | $ $ | kp3=14 | + ... $ $ +--------+ + + The entire graph is partitioned into "interval lists". + + An interval list is a sequence of ordered disjoint intervals over the same + key part. SEL_ARG are linked via "next" and "prev" pointers. Additionally, + all intervals in the list form an RB-tree, linked via left/right/parent + pointers. The RB-tree root SEL_ARG object will be further called "root of the + interval list". + + In the example pic, there are 4 interval lists: + "kp<1 OR kp1=2 OR kp1=3", "kp2=5", "kp3=10 OR kp3=12", "kp3=11 OR kp3=13". + The vertical lines represent SEL_ARG::next/prev pointers. + + In an interval list, each member X may have SEL_ARG::next_key_part pointer + pointing to the root of another interval list Y. The pointed interval list + must cover a key part with greater number (i.e. Y->part > X->part). + + In the example pic, the next_key_part pointers are represented by + horisontal lines. + + 2. SEL_ARG GRAPH SEMANTICS + + It represents a condition in a special form (we don't have a name for it ATM) + The SEL_ARG::next/prev is "OR", and next_key_part is "AND". + + For example, the picture represents the condition in form: + (kp1 < 1 AND kp2=5 AND (kp3=10 OR kp3=12)) OR + (kp1=2 AND (kp3=11 OR kp3=14)) OR + (kp1=3 AND (kp3=11 OR kp3=14)) + + + 3. SEL_ARG GRAPH USE + + Use get_mm_tree() to construct SEL_ARG graph from WHERE condition. + Then walk the SEL_ARG graph and get a list of dijsoint ordered key + intervals (i.e. intervals in form + + (constA1, .., const1_K) < (keypart1,.., keypartK) < (constB1, .., constB_K) + + Those intervals can be used to access the index. The uses are in: + - check_quick_select() - Walk the SEL_ARG graph and find an estimate of + how many table records are contained within all + intervals. + - get_quick_select() - Walk the SEL_ARG, materialize the key intervals, + and create QUICK_RANGE_SELECT object that will + read records within these intervals. +*/ + class SEL_ARG :public Sql_alloc { public: uint8 min_flag,max_flag,maybe_flag; uint8 part; // Which key part uint8 maybe_null; - uint16 elements; // Elements in tree - ulong use_count; // use of this sub_tree + /* + Number of children of this element in the RB-tree, plus 1 for this + element itself. + */ + uint16 elements; + /* + Valid only for elements which are RB-tree roots: Number of times this + RB-tree is referred to (it is referred by SEL_ARG::next_key_part or by + SEL_TREE::keys[i] or by a temporary SEL_ARG* variable) + */ + ulong use_count; + Field *field; char *min_value,*max_value; // Pointer to range - SEL_ARG *left,*right,*next,*prev,*parent,*next_key_part; + SEL_ARG *left,*right; /* R-B tree children */ + SEL_ARG *next,*prev; /* Links for bi-directional interval list */ + SEL_ARG *parent; /* R-B tree parent */ + SEL_ARG *next_key_part; enum leaf_color { BLACK,RED } color; enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type; @@ -1428,6 +1529,7 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) } increment_use_count(1); tmp->color= color; + tmp->elements= this->elements; return tmp; } @@ -5795,8 +5897,21 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) } +/* + Produce a SEL_ARG graph that represents "key1 AND key2" + + SYNOPSIS + key_and() + key1 First argument, root of its RB-tree + key2 Second argument, root of its RB-tree + + RETURN + RB-tree root of the resulting SEL_ARG graph. + NULL if the result of AND operation is an empty interval {0}. +*/ + static SEL_ARG * -key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) +key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) { if (!key1) return key2; @@ -5859,6 +5974,7 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) if ((key1->min_flag | key2->min_flag) & GEOM_FLAG) { + /* TODO: why not leave one of the trees? */ key1->free_tree(); key2->free_tree(); return 0; // Can't optimize this @@ -6581,6 +6697,51 @@ int test_rb_tree(SEL_ARG *element,SEL_ARG *parent) return -1; // Error, no more warnings } + +/* + Count how many times SEL_ARG graph "root" refers to its part "key" + + SYNOPSIS + count_key_part_usage() + root An RB-Root node in a SEL_ARG graph. + key Another RB-Root node in that SEL_ARG graph. + + DESCRIPTION + The passed "root" node may refer to "key" node via root->next_key_part, + root->next->n + + This function counts how many times the node "key" is referred (via + SEL_ARG::next_key_part) by + - intervals of RB-tree pointed by "root", + - intervals of RB-trees that are pointed by SEL_ARG::next_key_part from + intervals of RB-tree pointed by "root", + - and so on. + + Here is an example (horizontal links represent next_key_part pointers, + vertical links - next/prev prev pointers): + + +----+ $ + |root|-----------------+ + +----+ $ | + | $ | + | $ | + +----+ +---+ $ | +---+ Here the return value + | |- ... -| |---$-+--+->|key| will be 4. + +----+ +---+ $ | | +---+ + | $ | | + ... $ | | + | $ | | + +----+ +---+ $ | | + | |---| |---------+ | + +----+ +---+ $ | + | | $ | + ... +---+ $ | + | |------------+ + +---+ $ + RETURN + Number of links to "key" from nodes reachable from "root". +*/ + static ulong count_key_part_usage(SEL_ARG *root, SEL_ARG *key) { ulong count= 0; @@ -6598,6 +6759,20 @@ static ulong count_key_part_usage(SEL_ARG *root, SEL_ARG *key) } +/* + Check if SEL_ARG::use_count value is correct + + SYNOPSIS + SEL_ARG::test_use_count() + root The root node of the SEL_ARG graph (an RB-tree root node that + has the least value of sel_arg->part in the entire graph, and + thus is the "origin" of the graph) + + DESCRIPTION + Check if SEL_ARG::use_count value is correct. See the definition of + use_count for what is "correct". +*/ + void SEL_ARG::test_use_count(SEL_ARG *root) { uint e_count=0; diff --git a/sql/set_var.cc b/sql/set_var.cc index bb9ef4d453f..1176a98713d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1343,9 +1343,9 @@ bool sys_var_thd_binlog_format::is_readonly() const return 1; } /* - if in a stored function, it's too late to change mode + if in a stored function/trigger, it's too late to change mode */ - if (thd->spcont && thd->prelocked_mode) + if (thd->in_sub_stmt) { my_error(ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, MYF(0)); return 1; @@ -2794,7 +2794,8 @@ byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type, bool sys_var_last_insert_id::update(THD *thd, set_var *var) { - thd->insert_id(var->save_result.ulonglong_value); + thd->first_successful_insert_id_in_prev_stmt= + var->save_result.ulonglong_value; return 0; } @@ -2802,14 +2803,19 @@ bool sys_var_last_insert_id::update(THD *thd, set_var *var) byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - thd->sys_var_tmp.long_value= (long) thd->insert_id(); - return (byte*) &thd->last_insert_id; + /* + this tmp var makes it robust againt change of type of + read_first_successful_insert_id_in_prev_stmt(). + */ + thd->sys_var_tmp.ulonglong_value= + thd->read_first_successful_insert_id_in_prev_stmt(); + return (byte*) &thd->sys_var_tmp.ulonglong_value; } bool sys_var_insert_id::update(THD *thd, set_var *var) { - thd->next_insert_id= var->save_result.ulonglong_value; + thd->force_one_auto_inc_interval(var->save_result.ulonglong_value); return 0; } @@ -2817,7 +2823,9 @@ bool sys_var_insert_id::update(THD *thd, set_var *var) byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - return (byte*) &thd->current_insert_id; + thd->sys_var_tmp.ulonglong_value= + thd->auto_inc_intervals_forced.minimum(); + return (byte*) &thd->sys_var_tmp.ulonglong_value; } diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 8d2822370f2..75aba522543 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5839,3 +5839,5 @@ ER_CANT_ACTIVATE_LOG eng "Cannot activate '%-.64s' log." ER_RBR_NOT_AVAILABLE eng "The server was not built with row-based replication" +ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA + eng "Triggers can not be created on system tables" diff --git a/sql/slave.cc b/sql/slave.cc index 2bc3b1c4bfd..19060eba2d4 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -63,14 +63,14 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli); static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type); static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi); static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi, - bool suppress_warnings); + bool suppress_warnings); static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, - bool reconnect, bool suppress_warnings); + bool reconnect, bool suppress_warnings); static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed, - void* thread_killed_arg); + void* thread_killed_arg); static int request_table_dump(MYSQL* mysql, const char* db, const char* table); static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, - const char* table_name, bool overwrite); + const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); /* @@ -78,17 +78,17 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); SYNOPSIS init_thread_mask() - mask Return value here - mi master_info for slave - inverse If set, returns which threads are not running + mask Return value here + mi master_info for slave + inverse If set, returns which threads are not running IMPLEMENTATION Get a bit mask for which threads are running so that we can later restart these threads. RETURN - mask If inverse == 0, running threads - If inverse == 1, stopped threads + mask If inverse == 0, running threads + If inverse == 1, stopped threads */ void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse) @@ -168,7 +168,7 @@ int init_slave() } if (init_master_info(active_mi,master_info_file,relay_log_info_file, - !master_host, (SLAVE_IO | SLAVE_SQL))) + !master_host, (SLAVE_IO | SLAVE_SQL))) { sql_print_error("Failed to initialize the master info structure"); goto err; @@ -182,11 +182,11 @@ int init_slave() if (master_host && !opt_skip_slave_start) { if (start_slave_threads(1 /* need mutex */, - 0 /* no wait for start*/, - active_mi, - master_info_file, - relay_log_info_file, - SLAVE_IO | SLAVE_SQL)) + 0 /* no wait for start*/, + active_mi, + master_info_file, + relay_log_info_file, + SLAVE_IO | SLAVE_SQL)) { sql_print_error("Failed to create slave threads"); goto err; @@ -206,12 +206,12 @@ err: SYNOPSIS init_relay_log_pos() - rli Relay information (will be initialized) - log Name of relay log file to read from. NULL = First log - pos Position in relay log file - need_data_lock Set to 1 if this functions should do mutex locks - errmsg Store pointer to error message here - look_for_description_event + rli Relay information (will be initialized) + log Name of relay log file to read from. NULL = First log + pos Position in relay log file + need_data_lock Set to 1 if this functions should do mutex locks + errmsg Store pointer to error message here + look_for_description_event 1 if we should look for such an event. We only need this when the SQL thread starts and opens an existing relay log and has to execute it (possibly from an @@ -229,13 +229,13 @@ err: - check proper initialization of group_master_log_name/group_master_log_pos RETURN VALUES - 0 ok - 1 error. errmsg is set to point to the error message + 0 ok + 1 error. errmsg is set to point to the error message */ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, - ulonglong pos, bool need_data_lock, - const char** errmsg, + ulonglong pos, bool need_data_lock, + const char** errmsg, bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); @@ -243,7 +243,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); - + if (need_data_lock) pthread_mutex_lock(&rli->data_lock); @@ -260,13 +260,13 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, By default the relay log is in binlog format 3 (4.0). Even if format is 4, this will work enough to read the first event (Format_desc) (remember that format 4 is just lenghtened compared to format - 3; format 3 is a prefix of format 4). + 3; format 3 is a prefix of format 4). */ rli->relay_log.description_event_for_exec= new Format_description_log_event(3); - + pthread_mutex_lock(log_lock); - + /* Close log file and free buffers if it's already open */ if (rli->cur_log_fd >= 0) { @@ -274,7 +274,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, my_close(rli->cur_log_fd, MYF(MY_WME)); rli->cur_log_fd = -1; } - + rli->group_relay_log_pos = rli->event_relay_log_pos = pos; /* @@ -293,9 +293,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, goto err; } strmake(rli->group_relay_log_name,rli->linfo.log_file_name, - sizeof(rli->group_relay_log_name)-1); + sizeof(rli->group_relay_log_name)-1); strmake(rli->event_relay_log_name,rli->linfo.log_file_name, - sizeof(rli->event_relay_log_name)-1); + sizeof(rli->event_relay_log_name)-1); if (rli->relay_log.is_active(rli->linfo.log_file_name)) { /* @@ -314,7 +314,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, Open the relay log and set rli->cur_log to point at this one */ if ((rli->cur_log_fd=open_binlog(&rli->cache_buf, - rli->linfo.log_file_name,errmsg)) < 0) + rli->linfo.log_file_name,errmsg)) < 0) goto err; rli->cur_log = &rli->cache_buf; } @@ -325,7 +325,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */ { Log_event* ev; - while (look_for_description_event) + while (look_for_description_event) { /* Read the possible Format_description_log_event; if position @@ -378,7 +378,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, or Format_desc. */ } - else + else { DBUG_PRINT("info",("found event of another type=%d", ev->get_type_code())); @@ -391,7 +391,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, { char llbuf1[22], llbuf2[22]; DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", - llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(my_b_tell(rli->cur_log),llbuf1), llstr(rli->event_relay_log_pos,llbuf2))); } #endif @@ -406,7 +406,7 @@ err: if (!relay_log_purge) rli->log_space_limit= 0; pthread_cond_broadcast(&rli->data_cond); - + pthread_mutex_unlock(log_lock); if (need_data_lock) @@ -423,7 +423,7 @@ err: SYNOPSIS init_slave_skip_errors() - arg List of errors numbers to skip, separated with ',' + arg List of errors numbers to skip, separated with ',' NOTES Called from get_options() in mysqld.cc on start-up @@ -462,7 +462,7 @@ void init_slave_skip_errors(const char* arg) void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, - bool skip_lock) + bool skip_lock) { DBUG_ENTER("st_relay_log_info::inc_group_relay_log_pos"); @@ -471,10 +471,10 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, inc_event_relay_log_pos(); group_relay_log_pos= event_relay_log_pos; strmake(group_relay_log_name,event_relay_log_name, - sizeof(group_relay_log_name)-1); + sizeof(group_relay_log_name)-1); notify_group_relay_log_name_update(); - + /* If the slave does not support transactions and replicates a transaction, users should not trust group_master_log_pos (which they can display with @@ -506,7 +506,7 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, With the end_log_pos solution, we avoid computations involving lengthes. */ DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu", - (long) log_pos, (long) group_master_log_pos)); + (long) log_pos, (long) group_master_log_pos)); if (log_pos) // 3.23 binlogs don't have log_posx { group_master_log_pos= log_pos; @@ -546,7 +546,7 @@ void st_relay_log_info::close_temporary_tables() */ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, - const char** errmsg) + const char** errmsg) { int error=0; DBUG_ENTER("purge_relay_logs"); @@ -584,10 +584,10 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, rli->slave_skip_counter=0; pthread_mutex_lock(&rli->data_lock); - /* - we close the relay log fd possibly left open by the slave SQL thread, + /* + we close the relay log fd possibly left open by the slave SQL thread, to be able to delete it; the relay log fd possibly left open by the slave - I/O thread will be closed naturally in reset_logs() by the + I/O thread will be closed naturally in reset_logs() by the close(LOG_CLOSE_TO_BE_OPENED) call */ if (rli->cur_log_fd >= 0) @@ -605,9 +605,9 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, } /* Save name of used relay log file */ strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(), - sizeof(rli->group_relay_log_name)-1); + sizeof(rli->group_relay_log_name)-1); strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(), - sizeof(rli->event_relay_log_name)-1); + sizeof(rli->event_relay_log_name)-1); rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; if (count_relay_log_space(rli)) { @@ -617,12 +617,12 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, if (!just_reset) error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 0 /* do not need data lock */, errmsg, 0); - + 0 /* do not need data lock */, errmsg, 0); + err: #ifndef DBUG_OFF char buf[22]; -#endif +#endif DBUG_PRINT("info",("log_space_total: %s",llstr(rli->log_space_total,buf))); pthread_mutex_unlock(&rli->data_lock); DBUG_RETURN(error); @@ -641,7 +641,7 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) sql_cond_lock=sql_lock; io_cond_lock=io_lock; - + if (skip_lock) { sql_lock = io_lock = 0; @@ -651,10 +651,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) DBUG_PRINT("info",("Terminating IO thread")); mi->abort_slave=1; if ((error=terminate_slave_thread(mi->io_thd,io_lock, - io_cond_lock, - &mi->stop_cond, - &mi->slave_running)) && - !force_all) + io_cond_lock, + &mi->stop_cond, + &mi->slave_running)) && + !force_all) DBUG_RETURN(error); } if ((thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) && mi->rli.slave_running) @@ -663,10 +663,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) DBUG_ASSERT(mi->rli.sql_thd != 0) ; mi->rli.abort_slave=1; if ((error=terminate_slave_thread(mi->rli.sql_thd,sql_lock, - sql_cond_lock, - &mi->rli.stop_cond, - &mi->rli.slave_running)) && - !force_all) + sql_cond_lock, + &mi->rli.stop_cond, + &mi->rli.slave_running)) && + !force_all) DBUG_RETURN(error); } DBUG_RETURN(0); @@ -674,9 +674,9 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock) int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, - pthread_mutex_t *cond_lock, - pthread_cond_t* term_cond, - volatile uint *slave_running) + pthread_mutex_t *cond_lock, + pthread_cond_t* term_cond, + volatile uint *slave_running) { DBUG_ENTER("terminate_slave_thread"); if (term_lock) @@ -695,7 +695,7 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, be referening freed memory trying to kick it */ - while (*slave_running) // Should always be true + while (*slave_running) // Should always be true { DBUG_PRINT("loop", ("killing slave thread")); KICK_SLAVE(thd); @@ -714,11 +714,11 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, - pthread_mutex_t *cond_lock, - pthread_cond_t *start_cond, - volatile uint *slave_running, - volatile ulong *slave_run_id, - MASTER_INFO* mi, + pthread_mutex_t *cond_lock, + pthread_cond_t *start_cond, + volatile uint *slave_running, + volatile ulong *slave_run_id, + MASTER_INFO* mi, bool high_priority) { pthread_t th; @@ -738,7 +738,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, sql_print_error("Server id not set, will not start slave"); DBUG_RETURN(ER_BAD_SLAVE); } - + if (*slave_running) { if (start_cond) @@ -764,12 +764,12 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, { DBUG_PRINT("sleep",("Waiting for slave thread to start")); const char* old_msg = thd->enter_cond(start_cond,cond_lock, - "Waiting for slave thread to start"); + "Waiting for slave thread to start"); pthread_cond_wait(start_cond,cond_lock); thd->exit_cond(old_msg); pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released if (thd->killed) - DBUG_RETURN(thd->killed_errno()); + DBUG_RETURN(thd->killed_errno()); } } if (start_lock) @@ -788,14 +788,14 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, */ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, - MASTER_INFO* mi, const char* master_info_fname, - const char* slave_info_fname, int thread_mask) + MASTER_INFO* mi, const char* master_info_fname, + const char* slave_info_fname, int thread_mask) { pthread_mutex_t *lock_io=0,*lock_sql=0,*lock_cond_io=0,*lock_cond_sql=0; pthread_cond_t* cond_io=0,*cond_sql=0; int error=0; DBUG_ENTER("start_slave_threads"); - + if (need_slave_mutex) { lock_io = &mi->run_lock; @@ -811,15 +811,15 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, if (thread_mask & SLAVE_IO) error=start_slave_thread(handle_slave_io,lock_io,lock_cond_io, - cond_io, - &mi->slave_running, &mi->slave_run_id, - mi, 1); //high priority, to read the most possible + cond_io, + &mi->slave_running, &mi->slave_run_id, + mi, 1); //high priority, to read the most possible if (!error && (thread_mask & SLAVE_SQL)) { error=start_slave_thread(handle_slave_sql,lock_sql,lock_cond_sql, - cond_sql, - &mi->rli.slave_running, &mi->rli.slave_run_id, - mi, 0); + cond_sql, + &mi->rli.slave_running, &mi->rli.slave_run_id, + mi, 0); if (error) terminate_slave_threads(mi, thread_mask & SLAVE_IO, 0); } @@ -997,8 +997,8 @@ void skip_load_data_infile(NET *net) DBUG_ENTER("skip_load_data_infile"); (void)net_request_file(net, "/dev/null"); - (void)my_net_read(net); // discard response - (void)net_write_command(net, 0, "", 0, "", 0); // Send ok + (void)my_net_read(net); // discard response + (void)net_write_command(net, 0, "", 0, "", 0); // Send ok DBUG_VOID_RETURN; } @@ -1024,7 +1024,7 @@ const char *print_slave_db_safe(const char* db) } static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, - const char *default_val) + const char *default_val) { uint length; DBUG_ENTER("init_strvar_from_file"); @@ -1037,8 +1037,8 @@ static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, else { /* - If we truncated a line or stopped on last char, remove all chars - up to and including newline. + If we truncated a line or stopped on last char, remove all chars + up to and including newline. */ int c; while (((c=my_b_get(f)) != '\n' && c != my_b_EOF)); @@ -1059,8 +1059,8 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) char buf[32]; DBUG_ENTER("init_intvar_from_file"); - - if (my_b_gets(f, buf, sizeof(buf))) + + if (my_b_gets(f, buf, sizeof(buf))) { *var = atoi(buf); DBUG_RETURN(0); @@ -1081,7 +1081,7 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0 slaves are fooled. So we do this only to distinguish between 3.23 and more recent masters (it's too late to change things for 3.23). - + RETURNS 0 ok 1 error @@ -1098,7 +1098,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) */ delete mi->rli.relay_log.description_event_for_queue; mi->rli.relay_log.description_event_for_queue= 0; - + if (!my_isdigit(&my_charset_bin,*mysql->server_version)) errmsg = "Master reported unrecognized MySQL version"; else @@ -1106,7 +1106,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) /* Note the following switch will bug when we have MySQL branch 30 ;) */ - switch (*mysql->server_version) + switch (*mysql->server_version) { case '0': case '1': @@ -1115,13 +1115,13 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) break; case '3': mi->rli.relay_log.description_event_for_queue= new - Format_description_log_event(1, mysql->server_version); + Format_description_log_event(1, mysql->server_version); break; case '4': mi->rli.relay_log.description_event_for_queue= new - Format_description_log_event(3, mysql->server_version); + Format_description_log_event(3, mysql->server_version); break; - default: + default: /* Master is MySQL >=5.0. Give a default Format_desc event, so that we can take the early steps (like tests for "is this a 3.23 master") which we @@ -1131,18 +1131,18 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) master is 3.23, 4.0, etc. */ mi->rli.relay_log.description_event_for_queue= new - Format_description_log_event(4, mysql->server_version); + Format_description_log_event(4, mysql->server_version); break; } } - - /* + + /* This does not mean that a 5.0 slave will be able to read a 6.0 master; but as we don't know yet, we don't want to forbid this for now. If a 5.0 slave can't read a 6.0 master, this will show up when the slave can't read some events sent by the master, and there will be error messages. */ - + if (errmsg) { sql_print_error(errmsg); @@ -1162,12 +1162,12 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) */ MYSQL_RES *master_res= 0; MYSQL_ROW master_row; - + if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { - mi->clock_diff_with_master= + mi->clock_diff_with_master= (long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10)); } else @@ -1177,8 +1177,8 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); } if (master_res) - mysql_free_result(master_res); - + mysql_free_result(master_res); + /* Check that the master's server id and ours are different. Because if they are equal (which can result from a simple copy of master's datadir to slave, @@ -1245,9 +1245,9 @@ be equal for replication to work"; time and so could differ for slave and master even if they are really in the same system time zone. So we are omiting this check and just relying on documentation. Also according to Monty there are many users - who are using replication between servers in various time zones. Hence - such check will broke everything for them. (And now everything will - work for them because by default both their master and slave will have + who are using replication between servers in various time zones. Hence + such check will broke everything for them. (And now everything will + work for them because by default both their master and slave will have 'SYSTEM' time zone). This check is only necessary for 4.x masters (and < 5.0.4 masters but those were alpha). @@ -1257,7 +1257,7 @@ be equal for replication to work"; (master_res= mysql_store_result(mysql))) { if ((master_row= mysql_fetch_row(master_res)) && - strcmp(master_row[0], + strcmp(master_row[0], global_system_variables.time_zone->get_name()->ptr())) errmsg= "The slave I/O thread stops because master and slave have \ different values for the TIME_ZONE global variable. The values must \ @@ -1287,7 +1287,7 @@ err: */ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, - const char* table_name, bool overwrite) + const char* table_name, bool overwrite) { ulong packet_len; char *query, *save_db; @@ -1309,10 +1309,10 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, } if (net->read_pos[0] == 255) // error from master { - char *err_msg; + char *err_msg; err_msg= (char*) net->read_pos + ((mysql->server_capabilities & - CLIENT_PROTOCOL_41) ? - 3+SQLSTATE_LENGTH+1 : 3); + CLIENT_PROTOCOL_41) ? + 3+SQLSTATE_LENGTH+1 : 3); my_error(ER_MASTER, MYF(0), err_msg); DBUG_RETURN(1); } @@ -1347,15 +1347,16 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, // save old db in case we are creating in a different database save_db = thd->db; save_db_length= thd->db_length; - DBUG_ASSERT(db != 0); - thd->reset_db((char*)db, strlen(db)); + thd->db = (char*)db; + DBUG_ASSERT(thd->db != 0); + thd->db_length= strlen(thd->db); mysql_parse(thd, thd->query, packet_len); // run create table - thd->db = save_db; // leave things the way the were before + thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; thd->options = save_options; - + if (thd->query_error) - goto err; // mysql_parse took care of the error send + goto err; // mysql_parse took care of the error send thd->proc_info = "Opening master dump table"; tables.lock_type = TL_WRITE; @@ -1364,7 +1365,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, sql_print_error("create_table_from_dump: could not open created table"); goto err; } - + file = tables.table->file; thd->proc_info = "Reading master dump table data"; /* Copy the data file */ @@ -1395,22 +1396,22 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, err: close_thread_tables(thd); thd->net.no_send_ok = 0; - DBUG_RETURN(error); + DBUG_RETURN(error); } int fetch_master_table(THD *thd, const char *db_name, const char *table_name, - MASTER_INFO *mi, MYSQL *mysql, bool overwrite) + MASTER_INFO *mi, MYSQL *mysql, bool overwrite) { int error= 1; const char *errmsg=0; bool called_connected= (mysql != NULL); DBUG_ENTER("fetch_master_table"); DBUG_PRINT("enter", ("db_name: '%s' table_name: '%s'", - db_name,table_name)); + db_name,table_name)); if (!called_connected) - { + { if (!(mysql = mysql_init(NULL))) { DBUG_RETURN(1); @@ -1441,7 +1442,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, goto err; } if (create_table_from_dump(thd, mysql, db_name, - table_name, overwrite)) + table_name, overwrite)) goto err; // create_table_from_dump have sent the error already error = 0; @@ -1451,7 +1452,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, mysql_close(mysql); if (errmsg && thd->vio_ok()) my_message(error, errmsg, MYF(0)); - DBUG_RETURN(test(error)); // Return 1 on error + DBUG_RETURN(test(error)); // Return 1 on error } @@ -1567,44 +1568,44 @@ file '%s', errno %d)", fname, my_errno); goto err; } if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0, - MYF(MY_WME))) + MYF(MY_WME))) { sql_print_error("Failed to create a cache on relay log info file '%s'", - fname); + fname); msg= current_thd->net.last_error; goto err; } /* Init relay log with first entry in the relay index file */ if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */, - &msg, 0)) + &msg, 0)) { sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; } rli->group_master_log_name[0]= 0; - rli->group_master_log_pos= 0; + rli->group_master_log_pos= 0; rli->info_fd= info_fd; } else // file exists { if (info_fd >= 0) reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0); - else + else { int error=0; if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0) { sql_print_error("\ Failed to open the existing relay log info file '%s' (errno %d)", - fname, my_errno); + fname, my_errno); error= 1; } else if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L, 0, MYF(MY_WME))) { sql_print_error("Failed to create a cache on relay log info file '%s'", - fname); + fname); error= 1; } if (error) @@ -1617,16 +1618,16 @@ Failed to open the existing relay log info file '%s' (errno %d)", DBUG_RETURN(1); } } - + rli->info_fd = info_fd; int relay_log_pos, master_log_pos; if (init_strvar_from_file(rli->group_relay_log_name, - sizeof(rli->group_relay_log_name), + sizeof(rli->group_relay_log_name), &rli->info_file, "") || init_intvar_from_file(&relay_log_pos, - &rli->info_file, BIN_LOG_HEADER_SIZE) || + &rli->info_file, BIN_LOG_HEADER_SIZE) || init_strvar_from_file(rli->group_master_log_name, - sizeof(rli->group_master_log_name), + sizeof(rli->group_master_log_name), &rli->info_file, "") || init_intvar_from_file(&master_log_pos, &rli->info_file, 0)) { @@ -1639,15 +1640,15 @@ Failed to open the existing relay log info file '%s' (errno %d)", rli->group_master_log_pos= master_log_pos; if (init_relay_log_pos(rli, - rli->group_relay_log_name, - rli->group_relay_log_pos, - 0 /* no data lock*/, - &msg, 0)) + rli->group_relay_log_name, + rli->group_relay_log_pos, + 0 /* no data lock*/, + &msg, 0)) { char llbuf[22]; sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)", - rli->group_relay_log_name, - llstr(rli->group_relay_log_pos, llbuf)); + rli->group_relay_log_name, + llstr(rli->group_relay_log_pos, llbuf)); goto err; } } @@ -1656,7 +1657,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", { char llbuf1[22], llbuf2[22]; DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", - llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(my_b_tell(rli->cur_log),llbuf1), llstr(rli->event_relay_log_pos,llbuf2))); DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); @@ -1698,14 +1699,14 @@ static inline int add_relay_log(RELAY_LOG_INFO* rli,LOG_INFO* linfo) if (!my_stat(linfo->log_file_name,&s,MYF(0))) { sql_print_error("log %s listed in the index, but failed to stat", - linfo->log_file_name); + linfo->log_file_name); DBUG_RETURN(1); } rli->log_space_total += s.st_size; #ifndef DBUG_OFF char buf[22]; DBUG_PRINT("info",("log_space_total: %s", llstr(rli->log_space_total,buf))); -#endif +#endif DBUG_RETURN(0); } @@ -1720,11 +1721,11 @@ static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli) pthread_mutex_lock(&rli->log_space_lock); save_proc_info= thd->enter_cond(&rli->log_space_cond, - &rli->log_space_lock, - "\ + &rli->log_space_lock, + "\ Waiting for the slave SQL thread to free enough relay log space"); while (rli->log_space_limit < rli->log_space_total && - !(slave_killed=io_slave_killed(thd,mi)) && + !(slave_killed=io_slave_killed(thd,mi)) && !rli->ignore_log_space_limit) pthread_cond_wait(&rli->log_space_cond, &rli->log_space_lock); thd->exit_cond(save_proc_info); @@ -1747,9 +1748,9 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli) if (add_relay_log(rli,&linfo)) DBUG_RETURN(1); } while (!rli->relay_log.find_next_log(&linfo, 1)); - /* + /* As we have counted everything, including what may have written in a - preceding write, we must reset bytes_written, or we may count some space + preceding write, we must reset bytes_written, or we may count some space twice. */ rli->relay_log.reset_bytes_written(); @@ -1815,8 +1816,8 @@ void init_master_info_with_options(MASTER_INFO* mi) DBUG_ENTER("init_master_info_with_options"); mi->master_log_name[0] = 0; - mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number - + mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number + if (master_host) strmake(mi->host, master_host, sizeof(mi->host) - 1); if (master_user) @@ -1825,7 +1826,7 @@ void init_master_info_with_options(MASTER_INFO* mi) strmake(mi->password, master_password, MAX_PASSWORD_LENGTH); mi->port = master_port; mi->connect_retry = master_connect_retry; - + mi->ssl= master_ssl; if (master_ssl_ca) strmake(mi->ssl_ca, master_ssl_ca, sizeof(mi->ssl_ca)-1); @@ -1934,7 +1935,7 @@ file '%s', errno %d)", fname, my_errno); goto err; } if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,0, - MYF(MY_WME))) + MYF(MY_WME))) { sql_print_error("Failed to create a cache on master info file (\ file '%s')", fname); @@ -1994,8 +1995,8 @@ file '%s')", fname); overwritten by the second row later. */ if (init_strvar_from_file(mi->master_log_name, - sizeof(mi->master_log_name), &mi->file, - "")) + sizeof(mi->master_log_name), &mi->file, + "")) goto errwithmsg; lines= strtoul(mi->master_log_name, &first_non_digit, 10); @@ -2011,15 +2012,15 @@ file '%s')", fname); lines= 7; if (init_intvar_from_file(&master_log_pos, &mi->file, 4) || - init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file, - master_host) || - init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file, - master_user) || + init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file, + master_host) || + init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file, + master_user) || init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1, &mi->file, master_password) || - init_intvar_from_file(&port, &mi->file, master_port) || - init_intvar_from_file(&connect_retry, &mi->file, - master_connect_retry)) + init_intvar_from_file(&port, &mi->file, master_port) || + init_intvar_from_file(&connect_retry, &mi->file, + master_connect_retry)) goto errwithmsg; /* @@ -2058,8 +2059,8 @@ file '%s')", fname); mi->ssl= (my_bool) ssl; } DBUG_PRINT("master_info",("log_file_name: %s position: %ld", - mi->master_log_name, - (ulong) mi->master_log_pos)); + mi->master_log_name, + (ulong) mi->master_log_pos)); mi->rli.mi = mi; if (init_relay_log_info(&mi->rli, slave_info_fname)) @@ -2104,23 +2105,23 @@ int register_slave_on_master(MYSQL* mysql) /* 30 is a good safety margin */ if (report_host_len + report_user_len + report_password_len + 30 > sizeof(buf)) - DBUG_RETURN(0); // safety + DBUG_RETURN(0); // safety int4store(pos, server_id); pos+= 4; - pos= net_store_data(pos, report_host, report_host_len); + pos= net_store_data(pos, report_host, report_host_len); pos= net_store_data(pos, report_user, report_user_len); pos= net_store_data(pos, report_password, report_password_len); int2store(pos, (uint16) report_port); pos+= 2; - int4store(pos, rpl_recovery_rank); pos+= 4; + int4store(pos, rpl_recovery_rank); pos+= 4; /* The master will fill in master_id */ - int4store(pos, 0); pos+= 4; + int4store(pos, 0); pos+= 4; if (simple_command(mysql, COM_REGISTER_SLAVE, (char*) buf, - (uint) (pos- buf), 0)) + (uint) (pos- buf), 0)) { sql_print_error("Error on COM_REGISTER_SLAVE: %d '%s'", - mysql_errno(mysql), - mysql_error(mysql)); + mysql_errno(mysql), + mysql_error(mysql)); DBUG_RETURN(1); } DBUG_RETURN(0); @@ -2135,25 +2136,25 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) DBUG_ENTER("show_master_info"); field_list.push_back(new Item_empty_string("Slave_IO_State", - 14)); + 14)); field_list.push_back(new Item_empty_string("Master_Host", - sizeof(mi->host))); + sizeof(mi->host))); field_list.push_back(new Item_empty_string("Master_User", - sizeof(mi->user))); + sizeof(mi->user))); field_list.push_back(new Item_return_int("Master_Port", 7, - MYSQL_TYPE_LONG)); + MYSQL_TYPE_LONG)); field_list.push_back(new Item_return_int("Connect_Retry", 10, - MYSQL_TYPE_LONG)); + MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Master_Log_File", - FN_REFLEN)); + FN_REFLEN)); field_list.push_back(new Item_return_int("Read_Master_Log_Pos", 10, - MYSQL_TYPE_LONGLONG)); + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Relay_Log_File", - FN_REFLEN)); + FN_REFLEN)); field_list.push_back(new Item_return_int("Relay_Log_Pos", 10, - MYSQL_TYPE_LONGLONG)); + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Relay_Master_Log_File", - FN_REFLEN)); + FN_REFLEN)); field_list.push_back(new Item_empty_string("Slave_IO_Running", 3)); field_list.push_back(new Item_empty_string("Slave_SQL_Running", 3)); field_list.push_back(new Item_empty_string("Replicate_Do_DB", 20)); @@ -2162,33 +2163,33 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) field_list.push_back(new Item_empty_string("Replicate_Ignore_Table", 23)); field_list.push_back(new Item_empty_string("Replicate_Wild_Do_Table", 24)); field_list.push_back(new Item_empty_string("Replicate_Wild_Ignore_Table", - 28)); + 28)); field_list.push_back(new Item_return_int("Last_Errno", 4, MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Last_Error", 20)); field_list.push_back(new Item_return_int("Skip_Counter", 10, - MYSQL_TYPE_LONG)); + MYSQL_TYPE_LONG)); field_list.push_back(new Item_return_int("Exec_Master_Log_Pos", 10, - MYSQL_TYPE_LONGLONG)); + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_return_int("Relay_Log_Space", 10, - MYSQL_TYPE_LONGLONG)); + MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Until_Condition", 6)); field_list.push_back(new Item_empty_string("Until_Log_File", FN_REFLEN)); - field_list.push_back(new Item_return_int("Until_Log_Pos", 10, + field_list.push_back(new Item_return_int("Until_Log_Pos", 10, MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Master_SSL_Allowed", 7)); field_list.push_back(new Item_empty_string("Master_SSL_CA_File", sizeof(mi->ssl_ca))); - field_list.push_back(new Item_empty_string("Master_SSL_CA_Path", + field_list.push_back(new Item_empty_string("Master_SSL_CA_Path", sizeof(mi->ssl_capath))); - field_list.push_back(new Item_empty_string("Master_SSL_Cert", + field_list.push_back(new Item_empty_string("Master_SSL_Cert", sizeof(mi->ssl_cert))); - field_list.push_back(new Item_empty_string("Master_SSL_Cipher", + field_list.push_back(new Item_empty_string("Master_SSL_Cipher", sizeof(mi->ssl_cipher))); - field_list.push_back(new Item_empty_string("Master_SSL_Key", + field_list.push_back(new Item_empty_string("Master_SSL_Key", sizeof(mi->ssl_key))); field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, MYSQL_TYPE_LONGLONG)); - + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); @@ -2198,7 +2199,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) DBUG_PRINT("info",("host is set: '%s'", mi->host)); String *packet= &thd->packet; protocol->prepare_for_resend(); - + /* TODO: we read slave_running without run_lock, whereas these variables are updated under run_lock and not data_lock. In 5.0 we should lock @@ -2215,8 +2216,8 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) protocol->store(mi->master_log_name, &my_charset_bin); protocol->store((ulonglong) mi->master_log_pos); protocol->store(mi->rli.group_relay_log_name + - dirname_length(mi->rli.group_relay_log_name), - &my_charset_bin); + dirname_length(mi->rli.group_relay_log_name), + &my_charset_bin); protocol->store((ulonglong) mi->rli.group_relay_log_pos); protocol->store(mi->rli.group_master_log_name, &my_charset_bin); protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT ? @@ -2243,13 +2244,13 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) protocol->store((ulonglong) mi->rli.log_space_total); protocol->store( - mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None": + mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None": ( mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_MASTER_POS? "Master": "Relay"), &my_charset_bin); protocol->store(mi->rli.until_log_name, &my_charset_bin); protocol->store((ulonglong) mi->rli.until_log_pos); - -#ifdef HAVE_OPENSSL + +#ifdef HAVE_OPENSSL protocol->store(mi->ssl? "Yes":"No", &my_charset_bin); #else protocol->store(mi->ssl? "Ignored":"No", &my_charset_bin); @@ -2354,10 +2355,10 @@ int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) my_b_seek(file, 0L); my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n", - LINES_IN_MASTER_INFO_WITH_SSL, + LINES_IN_MASTER_INFO_WITH_SSL, mi->master_log_name, llstr(mi->master_log_pos, lbuf), - mi->host, mi->user, - mi->password, mi->port, mi->connect_retry, + mi->host, mi->user, + mi->password, mi->port, mi->connect_retry, (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert, mi->ssl_cipher, mi->ssl_key); DBUG_RETURN(-flush_io_cache(file)); @@ -2418,7 +2419,7 @@ st_relay_log_info::~st_relay_log_info() wait_for_pos() thd client thread that sent SELECT MASTER_POS_WAIT log_name log name to wait for - log_pos position to wait for + log_pos position to wait for timeout timeout in seconds before giving up waiting NOTES @@ -2457,7 +2458,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, msg= thd->enter_cond(&data_cond, &data_lock, "Waiting for the slave SQL thread to " "advance position"); - /* + /* This function will abort when it notices that some CHANGE MASTER or RESET MASTER has changed the master info. To catch this, these commands modify abort_pos_wait ; We just monitor @@ -2503,7 +2504,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, { error= -2; goto err; - } + } /* The "compare and wait" main loop */ while (!thd->killed && @@ -2563,7 +2564,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, } //wait for master update, with optional timeout. - + DBUG_PRINT("info",("Waiting for master update")); /* We are going to pthread_cond_(timed)wait(); if the SQL thread stops it @@ -2573,7 +2574,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, { /* Note that pthread_cond_timedwait checks for the timeout - before for the condition ; i.e. it returns ETIMEDOUT + before for the condition ; i.e. it returns ETIMEDOUT if the system time equals or exceeds the time specified by abstime before the condition variable is signaled or broadcast, _or_ if the absolute time specified by abstime has already passed at the time @@ -2607,7 +2608,7 @@ improper_arguments: %d timed_out: %d", (int) (error == -2), (int) (error == -1))); if (thd->killed || init_abort_pos_wait != abort_pos_wait || - !slave_running) + !slave_running) { error= -2; } @@ -2647,13 +2648,13 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) { DBUG_ENTER("init_slave_thread"); thd->system_thread = (thd_type == SLAVE_THD_SQL) ? - SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO; + SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO; thd->security_ctx->skip_grants(); my_net_init(&thd->net, 0); thd->net.read_timeout = slave_net_timeout; thd->slave_thread = 1; set_slave_thread_options(thd); - /* + /* It's nonsense to constrain the slave threads with max_join_size; if a query succeeded on master, we HAVE to execute it. So set OPTION_BIG_SELECTS. Setting max_join_size to HA_POS_ERROR is not enough @@ -2679,7 +2680,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) #if !defined(__WIN__) && !defined(__NETWARE__) sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use + VOID(sigemptyset(&set)); // Get mask in use VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); #endif @@ -2694,7 +2695,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed, - void* thread_killed_arg) + void* thread_killed_arg) { int nap_time; thr_alarm_t alarmed; @@ -2715,7 +2716,7 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed, thr_alarm(&alarmed, 2 * nap_time, &alarm_buff); sleep(nap_time); thr_end_alarm(&alarmed); - + if ((*thread_killed)(thd,thread_killed_arg)) DBUG_RETURN(1); start_time=time((time_t*) 0); @@ -2725,7 +2726,7 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed, static int request_dump(MYSQL* mysql, MASTER_INFO* mi, - bool *suppress_warnings) + bool *suppress_warnings) { char buf[FN_REFLEN + 10]; int len; @@ -2747,11 +2748,11 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi, now we just fill up the error log :-) */ if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) - *suppress_warnings= 1; // Suppress reconnect warning + *suppress_warnings= 1; // Suppress reconnect warning else sql_print_error("Error on COM_BINLOG_DUMP: %d %s, will retry in %d secs", - mysql_errno(mysql), mysql_error(mysql), - master_connect_retry); + mysql_errno(mysql), mysql_error(mysql), + master_connect_retry); DBUG_RETURN(1); } @@ -2771,14 +2772,14 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table) { sql_print_error("request_table_dump: Buffer overrun"); DBUG_RETURN(1); - } - + } + *p++ = db_len; memcpy(p, db, db_len); p += db_len; *p++ = table_len; memcpy(p, table, table_len); - + if (simple_command(mysql, COM_TABLE_DUMP, buf, p - buf + table_len, 1)) { sql_print_error("request_table_dump: Error sending the table dump \ @@ -2792,19 +2793,19 @@ command"); /* Read one event from the master - + SYNOPSIS read_event() - mysql MySQL connection - mi Master connection information - suppress_warnings TRUE when a normal net read timeout has caused us to - try a reconnect. We do not want to print anything to - the error log in this case because this a anormal - event in an idle server. + mysql MySQL connection + mi Master connection information + suppress_warnings TRUE when a normal net read timeout has caused us to + try a reconnect. We do not want to print anything to + the error log in this case because this a anormal + event in an idle server. RETURN VALUES - 'packet_error' Error - number Length of packet + 'packet_error' Error + number Length of packet */ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) @@ -2819,24 +2820,24 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) */ #ifndef DBUG_OFF if (disconnect_slave_event_count && !(mi->events_till_disconnect--)) - DBUG_RETURN(packet_error); + DBUG_RETURN(packet_error); #endif - + len = net_safe_read(mysql); if (len == packet_error || (long) len < 1) { if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) { /* - We are trying a normal reconnect after a read timeout; - we suppress prints to .err file as long as the reconnect - happens without problems + We are trying a normal reconnect after a read timeout; + we suppress prints to .err file as long as the reconnect + happens without problems */ *suppress_warnings= TRUE; } else sql_print_error("Error reading packet from server: %s ( server_errno=%d)", - mysql_error(mysql), mysql_errno(mysql)); + mysql_error(mysql), mysql_errno(mysql)); DBUG_RETURN(packet_error); } @@ -2845,13 +2846,13 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) { sql_print_information("Slave: received end packet from server, apparent " "master shutdown: %s", - mysql_error(mysql)); + mysql_error(mysql)); DBUG_RETURN(packet_error); } - + DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", - len, mysql->net.read_pos[4])); - DBUG_RETURN(len - 1); + len, mysql->net.read_pos[4])); + DBUG_RETURN(len - 1); } @@ -2861,8 +2862,8 @@ int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) switch (expected_error) { case ER_NET_READ_ERROR: - case ER_NET_ERROR_ON_WRITE: - case ER_SERVER_SHUTDOWN: + case ER_NET_ERROR_ON_WRITE: + case ER_SERVER_SHUTDOWN: case ER_NEW_ABORTING_CONNECTION: DBUG_RETURN(1); default: @@ -2875,25 +2876,25 @@ int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) SYNOPSYS st_relay_log_info::is_until_satisfied() DESCRIPTION - Checks if UNTIL condition is reached. Uses caching result of last - comparison of current log file name and target log file name. So cached - value should be invalidated if current log file name changes + Checks if UNTIL condition is reached. Uses caching result of last + comparison of current log file name and target log file name. So cached + value should be invalidated if current log file name changes (see st_relay_log_info::notify_... functions). - - This caching is needed to avoid of expensive string comparisons and + + This caching is needed to avoid of expensive string comparisons and strtol() conversions needed for log names comparison. We don't need to - compare them each time this function is called, we only need to do this - when current log name changes. If we have UNTIL_MASTER_POS condition we - need to do this only after Rotate_log_event::exec_event() (which is - rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS - condition then we should invalidate cached comarison value after + compare them each time this function is called, we only need to do this + when current log name changes. If we have UNTIL_MASTER_POS condition we + need to do this only after Rotate_log_event::exec_event() (which is + rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS + condition then we should invalidate cached comarison value after inc_group_relay_log_pos() which called for each group of events (so we - have some benefit if we have something like queries that use + have some benefit if we have something like queries that use autoincrement or if we have transactions). - + Should be called ONLY if until_condition != UNTIL_NONE ! RETURN VALUE - true - condition met or error happened (condition seems to have + true - condition met or error happened (condition seems to have bad log file name) false - condition not met */ @@ -2905,7 +2906,7 @@ bool st_relay_log_info::is_until_satisfied() DBUG_ENTER("st_relay_log_info::is_until_satisfied"); DBUG_ASSERT(until_condition != UNTIL_NONE); - + if (until_condition == UNTIL_MASTER_POS) { log_name= group_master_log_name; @@ -2916,7 +2917,7 @@ bool st_relay_log_info::is_until_satisfied() log_name= group_relay_log_name; log_pos= group_relay_log_pos; } - + if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { /* @@ -2930,7 +2931,7 @@ bool st_relay_log_info::is_until_satisfied() if (*log_name) { const char *basename= log_name + dirname_length(log_name); - + const char *q= (const char*)(fn_ext(basename)+1); if (strncmp(basename, until_log_name, (int)(q-basename)) == 0) { @@ -2940,11 +2941,11 @@ bool st_relay_log_info::is_until_satisfied() if (log_name_extension < until_log_name_extension) until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_LESS; else - until_log_names_cmp_result= - (log_name_extension > until_log_name_extension) ? + until_log_names_cmp_result= + (log_name_extension > until_log_name_extension) ? UNTIL_LOG_NAMES_CMP_GREATER : UNTIL_LOG_NAMES_CMP_EQUAL ; } - else + else { /* Probably error so we aborting */ sql_print_error("Slave SQL thread is stopped because UNTIL " @@ -2955,8 +2956,8 @@ bool st_relay_log_info::is_until_satisfied() else DBUG_RETURN(until_log_pos == 0); } - - DBUG_RETURN(((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL && + + DBUG_RETURN(((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL && log_pos >= until_log_pos) || until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_GREATER)); } @@ -3142,7 +3143,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) pthread_mutex_unlock(&rli->data_lock); thd->server_id = ev->server_id; // use the original server id for logging - thd->set_time(); // time the query + thd->set_time(); // time the query thd->lex->current_select= 0; if (!ev->when) ev->when = time(NULL); @@ -3191,16 +3192,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) { exec_res= 0; end_trans(thd, ROLLBACK); - /* chance for concurrent connection to get more locks */ + /* chance for concurrent connection to get more locks */ safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), - (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); + (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS - rli->trans_retries++; + rli->trans_retries++; rli->retried_trans++; pthread_mutex_unlock(&rli->data_lock); DBUG_PRINT("info", ("Slave retries transaction " "rli->trans_retries: %lu", rli->trans_retries)); - } + } } else sql_print_error("Slave SQL thread retried transaction %lu time(s) " @@ -3284,8 +3285,8 @@ pthread_handler_t handle_slave_io(void *arg) pthread_cond_broadcast(&mi->start_cond); DBUG_PRINT("master_info",("log_file_name: '%s' position: %s", - mi->master_log_name, - llstr(mi->master_log_pos,llbuff))); + mi->master_log_name, + llstr(mi->master_log_pos,llbuff))); if (!(mi->mysql = mysql = mysql_init(NULL))) { @@ -3298,9 +3299,9 @@ pthread_handler_t handle_slave_io(void *arg) if (!safe_connect(thd, mysql, mi)) sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\ replication started in log '%s' at position %s", mi->user, - mi->host, mi->port, - IO_RPL_LOG_NAME, - llstr(mi->master_log_pos,llbuff)); + mi->host, mi->port, + IO_RPL_LOG_NAME, + llstr(mi->master_log_pos,llbuff)); else { sql_print_information("Slave I/O thread killed while connecting to master"); @@ -3338,9 +3339,9 @@ connected: sql_print_error("Failed on request_dump()"); if (io_slave_killed(thd,mi)) { - sql_print_information("Slave I/O thread killed while requesting master \ + sql_print_information("Slave I/O thread killed while requesting master \ dump"); - goto err; + goto err; } mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT; @@ -3350,35 +3351,35 @@ dump"); #endif end_server(mysql); /* - First time retry immediately, assuming that we can recover - right away - if first time fails, sleep between re-tries - hopefuly the admin can fix the problem sometime + First time retry immediately, assuming that we can recover + right away - if first time fails, sleep between re-tries + hopefuly the admin can fix the problem sometime */ if (retry_count++) { - if (retry_count > master_retry_count) - goto err; // Don't retry forever - safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, - (void*)mi); + if (retry_count > master_retry_count) + goto err; // Don't retry forever + safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, + (void*)mi); } if (io_slave_killed(thd,mi)) { - sql_print_information("Slave I/O thread killed while retrying master \ + sql_print_information("Slave I/O thread killed while retrying master \ dump"); - goto err; + goto err; } thd->proc_info = "Reconnecting after a failed binlog dump request"; if (!suppress_warnings) - sql_print_error("Slave I/O thread: failed dump request, \ + sql_print_error("Slave I/O thread: failed dump request, \ reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME, - llstr(mi->master_log_pos,llbuff)); + llstr(mi->master_log_pos,llbuff)); if (safe_reconnect(thd, mysql, mi, suppress_warnings) || - io_slave_killed(thd,mi)) + io_slave_killed(thd,mi)) { - sql_print_information("Slave I/O thread killed during or \ + sql_print_information("Slave I/O thread killed during or \ after reconnect"); - goto err; + goto err; } goto connected; @@ -3397,72 +3398,72 @@ after reconnect"); ulong event_len = read_event(mysql, mi, &suppress_warnings); if (io_slave_killed(thd,mi)) { - if (global_system_variables.log_warnings) - sql_print_information("Slave I/O thread killed while reading event"); - goto err; + if (global_system_variables.log_warnings) + sql_print_information("Slave I/O thread killed while reading event"); + goto err; } if (event_len == packet_error) { - uint mysql_error_number= mysql_errno(mysql); - if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) - { - sql_print_error("\ + uint mysql_error_number= mysql_errno(mysql); + if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) + { + sql_print_error("\ Log entry on master is longer than max_allowed_packet (%ld) on \ slave. If the entry is correct, restart the server with a higher value of \ max_allowed_packet", - thd->variables.max_allowed_packet); - goto err; - } - if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG) - { - sql_print_error(ER(mysql_error_number), mysql_error_number, - mysql_error(mysql)); - goto err; - } + thd->variables.max_allowed_packet); + goto err; + } + if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG) + { + sql_print_error(ER(mysql_error_number), mysql_error_number, + mysql_error(mysql)); + goto err; + } mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT; - thd->proc_info = "Waiting to reconnect after a failed master event read"; + thd->proc_info = "Waiting to reconnect after a failed master event read"; #ifdef SIGNAL_WITH_VIO_CLOSE thd->clear_active_vio(); #endif - end_server(mysql); - if (retry_count++) - { - if (retry_count > master_retry_count) - goto err; // Don't retry forever - safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, - (void*) mi); - } - if (io_slave_killed(thd,mi)) - { - if (global_system_variables.log_warnings) - sql_print_information("Slave I/O thread killed while waiting to \ + end_server(mysql); + if (retry_count++) + { + if (retry_count > master_retry_count) + goto err; // Don't retry forever + safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, + (void*) mi); + } + if (io_slave_killed(thd,mi)) + { + if (global_system_variables.log_warnings) + sql_print_information("Slave I/O thread killed while waiting to \ reconnect after a failed read"); - goto err; - } - thd->proc_info = "Reconnecting after a failed master event read"; - if (!suppress_warnings) - sql_print_information("Slave I/O thread: Failed reading log event, \ + goto err; + } + thd->proc_info = "Reconnecting after a failed master event read"; + if (!suppress_warnings) + sql_print_information("Slave I/O thread: Failed reading log event, \ reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME, - llstr(mi->master_log_pos, llbuff)); - if (safe_reconnect(thd, mysql, mi, suppress_warnings) || - io_slave_killed(thd,mi)) - { - if (global_system_variables.log_warnings) - sql_print_information("Slave I/O thread killed during or after a \ + llstr(mi->master_log_pos, llbuff)); + if (safe_reconnect(thd, mysql, mi, suppress_warnings) || + io_slave_killed(thd,mi)) + { + if (global_system_variables.log_warnings) + sql_print_information("Slave I/O thread killed during or after a \ reconnect done to recover from failed read"); - goto err; - } - goto connected; + goto err; + } + goto connected; } // if (event_len == packet_error) - retry_count=0; // ok event, reset retry counter + retry_count=0; // ok event, reset retry counter thd->proc_info = "Queueing master event to the relay log"; if (queue_event(mi,(const char*)mysql->net.read_pos + 1, - event_len)) + event_len)) { - sql_print_error("Slave I/O thread could not queue event from master"); - goto err; + sql_print_error("Slave I/O thread could not queue event from master"); + goto err; } if (flush_master_info(mi, 1)) { @@ -3488,31 +3489,30 @@ reconnect done to recover from failed read"); ignore_log_space_limit=%d", llstr(rli->log_space_limit,llbuf1), llstr(rli->log_space_total,llbuf2), - (int) rli->ignore_log_space_limit)); + (int) rli->ignore_log_space_limit)); } #endif if (rli->log_space_limit && rli->log_space_limit < - rli->log_space_total && + rli->log_space_total && !rli->ignore_log_space_limit) - if (wait_for_relay_log_space(rli)) - { - sql_print_error("Slave I/O thread aborted while waiting for relay \ + if (wait_for_relay_log_space(rli)) + { + sql_print_error("Slave I/O thread aborted while waiting for relay \ log space"); - goto err; - } - } + goto err; + } + } } // error = 0; err: // print the current replication position sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", - IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); + IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query= 0; // extra safety - thd->query_length= 0; - thd->reset_db(NULL, 0); + thd->query = thd->db = 0; // extra safety + thd->query_length= thd->db_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { @@ -3533,15 +3533,12 @@ err: write_ignored_events_info_to_relay_log(thd, mi); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&mi->run_lock); - mi->slave_running = 0; - mi->io_thd = 0; /* Forget the relay log's format */ delete mi->rli.relay_log.description_event_for_queue; mi->rli.relay_log.description_event_for_queue= 0; // TODO: make rpl_status part of MASTER_INFO change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); - mi->abort_slave = 0; // TODO: check if this is needed DBUG_ASSERT(thd->net.buff != 0); net_end(&thd->net); // destructor will not free it, because net.vio is 0 close_thread_tables(thd, 0); @@ -3549,11 +3546,14 @@ err: THD_CHECK_SENTRY(thd); delete thd; pthread_mutex_unlock(&LOCK_thread_count); - pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done + mi->abort_slave = 0; + mi->slave_running = 0; + mi->io_thd = 0; pthread_mutex_unlock(&mi->run_lock); + pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); // Can't return anything here + DBUG_RETURN(0); // Can't return anything here } @@ -3561,7 +3561,7 @@ err: pthread_handler_t handle_slave_sql(void *arg) { - THD *thd; /* needs to be first for thread_stack */ + THD *thd; /* needs to be first for thread_stack */ char llbuff[22],llbuff1[22]; RELAY_LOG_INFO* rli = &((MASTER_INFO*)arg)->rli; @@ -3575,13 +3575,13 @@ pthread_handler_t handle_slave_sql(void *arg) pthread_mutex_lock(&rli->run_lock); DBUG_ASSERT(!rli->slave_running); errmsg= 0; -#ifndef DBUG_OFF +#ifndef DBUG_OFF rli->events_till_abort = abort_slave_event_count; -#endif +#endif thd = new THD; // note that contructor of THD uses DBUG_ ! thd->thread_stack = (char*)&thd; // remember where our stack is - + /* Inform waiting threads that slave has started */ rli->slave_run_id++; @@ -3635,13 +3635,13 @@ pthread_handler_t handle_slave_sql(void *arg) rli->trans_retries= 0; // start from "no error" if (init_relay_log_pos(rli, - rli->group_relay_log_name, - rli->group_relay_log_pos, - 1 /*need data lock*/, &errmsg, + rli->group_relay_log_name, + rli->group_relay_log_pos, + 1 /*need data lock*/, &errmsg, 1 /*look for a description_event*/)) { sql_print_error("Error initializing relay log position: %s", - errmsg); + errmsg); goto err; } THD_CHECK_SENTRY(thd); @@ -3649,7 +3649,7 @@ pthread_handler_t handle_slave_sql(void *arg) { char llbuf1[22], llbuf2[22]; DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", - llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(my_b_tell(rli->cur_log),llbuf1), llstr(rli->event_relay_log_pos,llbuf2))); DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); /* @@ -3672,13 +3672,13 @@ pthread_handler_t handle_slave_sql(void *arg) DBUG_ASSERT(rli->sql_thd == thd); DBUG_PRINT("master_info",("log_file_name: %s position: %s", - rli->group_master_log_name, - llstr(rli->group_master_log_pos,llbuff))); + rli->group_master_log_name, + llstr(rli->group_master_log_pos,llbuff))); if (global_system_variables.log_warnings) sql_print_information("Slave SQL thread initialized, starting replication in \ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME, - llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name, - llstr(rli->group_relay_log_pos,llbuff1)); + llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name, + llstr(rli->group_relay_log_pos,llbuff1)); /* execute init_slave variable */ if (sys_init_slave.value_length) @@ -3742,8 +3742,8 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ /* Thread stopped. Print the current replication position to the log */ sql_print_information("Slave SQL thread exiting, replication stopped in log " - "'%s' at position %s", - RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); + "'%s' at position %s", + RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: @@ -3760,10 +3760,8 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ should already have done these assignments (each event which sets these variables is supposed to set them to 0 before terminating)). */ - thd->catalog= 0; - thd->reset_db(NULL, 0); - thd->query= 0; - thd->query_length= 0; + thd->query= thd->db= thd->catalog= 0; + thd->query_length= thd->db_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&rli->run_lock); @@ -3771,7 +3769,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ pthread_mutex_lock(&rli->data_lock); DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun /* When master_pos_wait() wakes up it will check this and terminate */ - rli->slave_running= 0; + rli->slave_running= 0; /* Forget the relay log's format */ delete rli->relay_log.description_event_for_exec; rli->relay_log.description_event_for_exec= 0; @@ -3803,7 +3801,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ pthread_mutex_unlock(&rli->run_lock); my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); // Can't return anything here + DBUG_RETURN(0); // Can't return anything here } @@ -3832,11 +3830,11 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) thd->file_id = cev->file_id = mi->file_id++; thd->server_id = cev->server_id; cev_not_written = 1; - + if (unlikely(net_request_file(net,cev->fname))) { sql_print_error("Slave I/O: failed requesting download of '%s'", - cev->fname); + cev->fname); goto err; } @@ -3847,18 +3845,18 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) */ { Append_block_log_event aev(thd,0,0,0,0); - + for (;;) { if (unlikely((num_bytes=my_net_read(net)) == packet_error)) { - sql_print_error("Network read error downloading '%s' from master", - cev->fname); - goto err; + sql_print_error("Network read error downloading '%s' from master", + cev->fname); + goto err; } if (unlikely(!num_bytes)) /* eof */ { - net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */ + net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */ /* If we wrote Create_file_log_event, then we need to write Execute_load_log_event. If we did not write Create_file_log_event, @@ -3866,43 +3864,43 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) INFILE had not existed, i.e. write nothing. */ if (unlikely(cev_not_written)) - break; - Execute_load_log_event xev(thd,0,0); - xev.log_pos = cev->log_pos; - if (unlikely(mi->rli.relay_log.append(&xev))) - { - sql_print_error("Slave I/O: error writing Exec_load event to \ + break; + Execute_load_log_event xev(thd,0,0); + xev.log_pos = cev->log_pos; + if (unlikely(mi->rli.relay_log.append(&xev))) + { + sql_print_error("Slave I/O: error writing Exec_load event to \ relay log"); - goto err; - } - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); - break; + goto err; + } + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); + break; } if (unlikely(cev_not_written)) { - cev->block = (char*)net->read_pos; - cev->block_len = num_bytes; - if (unlikely(mi->rli.relay_log.append(cev))) - { - sql_print_error("Slave I/O: error writing Create_file event to \ + cev->block = (char*)net->read_pos; + cev->block_len = num_bytes; + if (unlikely(mi->rli.relay_log.append(cev))) + { + sql_print_error("Slave I/O: error writing Create_file event to \ relay log"); - goto err; - } - cev_not_written=0; - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); + goto err; + } + cev_not_written=0; + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total); } else { - aev.block = (char*)net->read_pos; - aev.block_len = num_bytes; - aev.log_pos = cev->log_pos; - if (unlikely(mi->rli.relay_log.append(&aev))) - { - sql_print_error("Slave I/O: error writing Append_block event to \ + aev.block = (char*)net->read_pos; + aev.block_len = num_bytes; + aev.log_pos = cev->log_pos; + if (unlikely(mi->rli.relay_log.append(&aev))) + { + sql_print_error("Slave I/O: error writing Append_block event to \ relay log"); - goto err; - } - mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ; + goto err; + } + mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ; } } } @@ -3917,8 +3915,8 @@ err: SYNOPSIS process_io_rotate() - mi master_info for the slave - rev The rotate log event read from the binary log + mi master_info for the slave + rev The rotate log event read from the binary log DESCRIPTION Updates the master info with the place in the next binary @@ -3929,8 +3927,8 @@ err: We assume we already locked mi->data_lock RETURN VALUES - 0 ok - 1 Log event is illegal + 0 ok + 1 Log event is illegal */ @@ -3946,7 +3944,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1); mi->master_log_pos= rev->pos; DBUG_PRINT("info", ("master_log_pos: '%s' %d", - mi->master_log_name, (ulong) mi->master_log_pos)); + mi->master_log_name, (ulong) mi->master_log_pos)); #ifndef DBUG_OFF /* If we do not do this, we will be getting the first @@ -3983,7 +3981,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) copied from MySQL 4.0. */ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, - ulong event_len) + ulong event_len) { const char *errmsg = 0; ulong inc_pos; @@ -4027,7 +4025,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, { sql_print_error("Read invalid event from master: '%s',\ master could be corrupt but a more likely cause of this is a bug", - errmsg); + errmsg); my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(1); } @@ -4073,8 +4071,8 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, } if (likely(!ignore_event)) { - if (ev->log_pos) - /* + if (ev->log_pos) + /* Don't do it for fake Rotate events (see comment in Log_event::Log_event(const char* buf...) in log_event.cc). */ @@ -4099,7 +4097,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, from queue_binlog_ver_1_event(), with some affordable simplifications. */ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, - ulong event_len) + ulong event_len) { const char *errmsg = 0; ulong inc_pos; @@ -4114,7 +4112,7 @@ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, { sql_print_error("Read invalid event from master: '%s',\ master could be corrupt but a more likely cause of this is a bug", - errmsg); + errmsg); my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(1); } @@ -4157,13 +4155,13 @@ err: (exactly, slave's) format. To do the conversion, we create a 5.0 event from the 3.23/4.0 bytes, then write this event to the relay log. - TODO: + TODO: Test this code before release - it has to be tested on a separate setup with 3.23 master or 4.0 master */ static int queue_old_event(MASTER_INFO *mi, const char *buf, - ulong event_len) + ulong event_len) { DBUG_ENTER("queue_old_event"); @@ -4175,7 +4173,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(queue_binlog_ver_3_event(mi,buf,event_len)); default: /* unsupported format; eg version 2 */ DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()", - mi->rli.relay_log.description_event_for_queue->binlog_version)); + mi->rli.relay_log.description_event_for_queue->binlog_version)); DBUG_RETURN(1); } } @@ -4213,7 +4211,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) cleaning is already done on a per-master-thread basis (as the master server is shutting down cleanly, it has written all DROP TEMPORARY TABLE prepared statements' deletion are TODO only when we binlog prep stmts). - + We don't even increment mi->master_log_pos, because we may be just after a Rotate event. Btw, in a few milliseconds we are going to have a Start event from the next binlog (unless the master is presently running @@ -4222,7 +4220,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) goto err; case ROTATE_EVENT: { - Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue); + Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue); if (unlikely(process_io_rotate(mi,&rev))) { error= 1; @@ -4257,17 +4255,17 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) } delete mi->rli.relay_log.description_event_for_queue; mi->rli.relay_log.description_event_for_queue= tmp; - /* + /* Though this does some conversion to the slave's format, this will - preserve the master's binlog format version, and number of event types. + preserve the master's binlog format version, and number of event types. */ - /* + /* If the event was not requested by the slave (the slave did not ask for - it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos + it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos */ inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0; DBUG_PRINT("info",("binlog format is now %d", - mi->rli.relay_log.description_event_for_queue->binlog_version)); + mi->rli.relay_log.description_event_for_queue->binlog_version)); } break; @@ -4276,8 +4274,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) break; } - /* - If this event is originating from this server, don't queue it. + /* + If this event is originating from this server, don't queue it. We don't check this for 3.23 events because it's simpler like this; 3.23 will be filtered anyway by the SQL slave thread which also tests the server id (we must also keep this test in the SQL thread, in case somebody @@ -4319,7 +4317,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) } rli->relay_log.signal_update(); // the slave SQL thread needs to re-check DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); - } + } else { /* write the event to the relay log */ @@ -4378,13 +4376,13 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) SYNPOSIS safe_connect() - thd Thread handler for slave - mysql MySQL connection handle - mi Replication handle + thd Thread handler for slave + mysql MySQL connection handle + mi Replication handle RETURN - 0 ok - # Error + 0 ok + # Error */ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) @@ -4405,10 +4403,10 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) */ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, - bool reconnect, bool suppress_warnings) + bool reconnect, bool suppress_warnings) { int slave_was_killed; - int last_errno= -2; // impossible error + int last_errno= -2; // impossible error ulong err_count=0; char llbuff[22]; DBUG_ENTER("connect_to_master"); @@ -4418,16 +4416,16 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, #endif ulong client_flag= CLIENT_REMEMBER_OPTIONS; if (opt_slave_compressed_protocol) - client_flag=CLIENT_COMPRESS; /* We will use compression */ + client_flag=CLIENT_COMPRESS; /* We will use compression */ mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout); mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout); - + #ifdef HAVE_OPENSSL if (mi->ssl) - mysql_ssl_set(mysql, + mysql_ssl_set(mysql, mi->ssl_key[0]?mi->ssl_key:0, - mi->ssl_cert[0]?mi->ssl_cert:0, + mi->ssl_cert[0]?mi->ssl_cert:0, mi->ssl_ca[0]?mi->ssl_ca:0, mi->ssl_capath[0]?mi->ssl_capath:0, mi->ssl_cipher[0]?mi->ssl_cipher:0); @@ -4438,9 +4436,9 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir); while (!(slave_was_killed = io_slave_killed(thd,mi)) && - (reconnect ? mysql_reconnect(mysql) != 0 : - mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, - mi->port, 0, client_flag) == 0)) + (reconnect ? mysql_reconnect(mysql) != 0 : + mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, + mi->port, 0, client_flag) == 0)) { /* Don't repeat last error */ if ((int)mysql_errno(mysql) != last_errno) @@ -4450,11 +4448,11 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, sql_print_error("Slave I/O thread: error %s to master \ '%s@%s:%d': \ Error: '%s' errno: %d retry-time: %d retries: %d", - (reconnect ? "reconnecting" : "connecting"), - mi->user,mi->host,mi->port, - mysql_error(mysql), last_errno, - mi->connect_retry, - master_retry_count); + (reconnect ? "reconnecting" : "connecting"), + mi->user,mi->host,mi->port, + mysql_error(mysql), last_errno, + mi->connect_retry, + master_retry_count); } /* By default we try forever. The reason is that failure will trigger @@ -4470,19 +4468,19 @@ Error: '%s' errno: %d retry-time: %d retries: %d", break; } safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, - (void*)mi); + (void*)mi); } if (!slave_was_killed) { if (reconnect) - { + { if (!suppress_warnings && global_system_variables.log_warnings) - sql_print_information("Slave: connected to master '%s@%s:%d',\ + sql_print_information("Slave: connected to master '%s@%s:%d',\ replication resumed in log '%s' at position %s", mi->user, - mi->host, mi->port, - IO_RPL_LOG_NAME, - llstr(mi->master_log_pos,llbuff)); + mi->host, mi->port, + IO_RPL_LOG_NAME, + llstr(mi->master_log_pos,llbuff)); } else { @@ -4492,7 +4490,7 @@ replication resumed in log '%s' at position %s", mi->user, } #ifdef SIGNAL_WITH_VIO_CLOSE thd->set_active_vio(mysql->net.vio); -#endif +#endif } mysql->reconnect= 1; DBUG_PRINT("exit",("slave_was_killed: %d", slave_was_killed)); @@ -4509,7 +4507,7 @@ replication resumed in log '%s' at position %s", mi->user, */ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi, - bool suppress_warnings) + bool suppress_warnings) { DBUG_ENTER("safe_reconnect"); DBUG_RETURN(connect_to_master(thd, mysql, mi, 1, suppress_warnings)); @@ -4522,7 +4520,7 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi, SYNOPSIS flush_relay_log_info() - rli Relay log information + rli Relay log information NOTES - As this is only called by the slave thread, we don't need to @@ -4541,8 +4539,8 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi, longlong2str. RETURN VALUES - 0 ok - 1 write error + 0 ok + 1 write error */ bool flush_relay_log_info(RELAY_LOG_INFO* rli) @@ -4587,12 +4585,12 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) IO_CACHE *cur_log = rli->cur_log=&rli->cache_buf; if ((rli->cur_log_fd=open_binlog(cur_log,rli->event_relay_log_name, - errmsg)) <0) + errmsg)) <0) DBUG_RETURN(0); /* We want to start exactly where we was before: - relay_log_pos Current log pos - pending Number of bytes already processed from the event + relay_log_pos Current log pos + pending Number of bytes already processed from the event */ rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); my_b_seek(cur_log,rli->event_relay_log_pos); @@ -4624,7 +4622,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) pthread_cond_wait() with the non-data_lock mutex */ safe_mutex_assert_owner(&rli->data_lock); - + while (!sql_slave_killed(thd,rli)) { /* @@ -4645,17 +4643,17 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) pthread_mutex_lock(log_lock); /* - Reading xxx_file_id is safe because the log will only - be rotated when we hold relay_log.LOCK_log + Reading xxx_file_id is safe because the log will only + be rotated when we hold relay_log.LOCK_log */ if (rli->relay_log.get_open_count() != rli->cur_log_old_open_count) { - // The master has switched to a new log file; Reopen the old log file - cur_log=reopen_relay_log(rli, &errmsg); - pthread_mutex_unlock(log_lock); - if (!cur_log) // No more log files - goto err; - hot_log=0; // Using old binary log + // The master has switched to a new log file; Reopen the old log file + cur_log=reopen_relay_log(rli, &errmsg); + pthread_mutex_unlock(log_lock); + if (!cur_log) // No more log files + goto err; + hot_log=0; // Using old binary log } } @@ -4693,25 +4691,25 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) */ rli->future_event_relay_log_pos= my_b_tell(cur_log); if (hot_log) - pthread_mutex_unlock(log_lock); + pthread_mutex_unlock(log_lock); DBUG_RETURN(ev); } DBUG_ASSERT(thd==rli->sql_thd); - if (opt_reckless_slave) // For mysql-test + if (opt_reckless_slave) // For mysql-test cur_log->error = 0; if (cur_log->error < 0) { errmsg = "slave SQL thread aborted because of I/O error"; if (hot_log) - pthread_mutex_unlock(log_lock); + pthread_mutex_unlock(log_lock); goto err; } if (!cur_log->error) /* EOF */ { /* - On a hot log, EOF means that there are no more updates to - process and we must block until I/O thread adds some and - signals us to continue + On a hot log, EOF means that there are no more updates to + process and we must block until I/O thread adds some and + signals us to continue */ if (hot_log) { @@ -4730,7 +4728,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) time_t save_timestamp= rli->last_master_timestamp; rli->last_master_timestamp= 0; - DBUG_ASSERT(rli->relay_log.get_open_count() == + DBUG_ASSERT(rli->relay_log.get_open_count() == rli->cur_log_old_open_count); if (rli->ign_master_log_name_end[0]) @@ -4752,14 +4750,14 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) DBUG_RETURN(ev); } - /* - We can, and should release data_lock while we are waiting for - update. If we do not, show slave status will block - */ - pthread_mutex_unlock(&rli->data_lock); + /* + We can, and should release data_lock while we are waiting for + update. If we do not, show slave status will block + */ + pthread_mutex_unlock(&rli->data_lock); /* - Possible deadlock : + Possible deadlock : - the I/O thread has reached log_space_limit - the SQL thread has read all relay logs, but cannot purge for some reason: @@ -4771,10 +4769,10 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) the I/O thread to temporarily ignore the log_space_limit constraint, because we do not want the I/O thread to block because of space (it's ok if it blocks for any other reason (e.g. because the - master does not send anything). Then the I/O thread stops waiting + master does not send anything). Then the I/O thread stops waiting and reads more events. The SQL thread decides when the I/O thread should take log_space_limit - into account again : ignore_log_space_limit is reset to 0 + into account again : ignore_log_space_limit is reset to 0 in purge_first_log (when the SQL thread purges the just-read relay log), and also when the SQL thread starts. We should also reset ignore_log_space_limit to 0 when the user does RESET SLAVE, but in @@ -4784,7 +4782,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) */ pthread_mutex_lock(&rli->log_space_lock); // prevent the I/O thread from blocking next times - rli->ignore_log_space_limit= 1; + rli->ignore_log_space_limit= 1; /* If the I/O thread is blocked, unblock it. Ok to broadcast after unlock, because the mutex is only destroyed in @@ -4798,21 +4796,21 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) // re-acquire data lock since we released it earlier pthread_mutex_lock(&rli->data_lock); rli->last_master_timestamp= save_timestamp; - continue; + continue; } /* - If the log was not hot, we need to move to the next log in - sequence. The next log could be hot or cold, we deal with both - cases separately after doing some common initialization + If the log was not hot, we need to move to the next log in + sequence. The next log could be hot or cold, we deal with both + cases separately after doing some common initialization */ end_io_cache(cur_log); DBUG_ASSERT(rli->cur_log_fd >= 0); my_close(rli->cur_log_fd, MYF(MY_WME)); rli->cur_log_fd = -1; - + if (relay_log_purge) { - /* + /* purge_first_log will properly set up relay log coordinates in rli. If the group's coordinates are equal to the event's coordinates (i.e. the relay log was not rotated in the middle of a group), @@ -4823,33 +4821,33 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) - I see no better detection method - purge_first_log is not called that often */ - if (rli->relay_log.purge_first_log + if (rli->relay_log.purge_first_log (rli, rli->group_relay_log_pos == rli->event_relay_log_pos && !strcmp(rli->group_relay_log_name,rli->event_relay_log_name))) - { - errmsg = "Error purging processed logs"; - goto err; - } + { + errmsg = "Error purging processed logs"; + goto err; + } } else { - /* - If hot_log is set, then we already have a lock on - LOCK_log. If not, we have to get the lock. - - According to Sasha, the only time this code will ever be executed - is if we are recovering from a bug. - */ - if (rli->relay_log.find_next_log(&rli->linfo, !hot_log)) - { - errmsg = "error switching to the next log"; - goto err; - } - rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE; - strmake(rli->event_relay_log_name,rli->linfo.log_file_name, - sizeof(rli->event_relay_log_name)-1); - flush_relay_log_info(rli); + /* + If hot_log is set, then we already have a lock on + LOCK_log. If not, we have to get the lock. + + According to Sasha, the only time this code will ever be executed + is if we are recovering from a bug. + */ + if (rli->relay_log.find_next_log(&rli->linfo, !hot_log)) + { + errmsg = "error switching to the next log"; + goto err; + } + rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE; + strmake(rli->event_relay_log_name,rli->linfo.log_file_name, + sizeof(rli->event_relay_log_name)-1); + flush_relay_log_info(rli); } /* @@ -4868,66 +4866,66 @@ static Log_event* next_event(RELAY_LOG_INFO* rli) if (rli->relay_log.is_active(rli->linfo.log_file_name)) { #ifdef EXTRA_DEBUG - if (global_system_variables.log_warnings) - sql_print_information("next log '%s' is currently active", + if (global_system_variables.log_warnings) + sql_print_information("next log '%s' is currently active", rli->linfo.log_file_name); -#endif - rli->cur_log= cur_log= rli->relay_log.get_log_file(); - rli->cur_log_old_open_count= rli->relay_log.get_open_count(); - DBUG_ASSERT(rli->cur_log_fd == -1); - - /* - Read pointer has to be at the start since we are the only - reader. +#endif + rli->cur_log= cur_log= rli->relay_log.get_log_file(); + rli->cur_log_old_open_count= rli->relay_log.get_open_count(); + DBUG_ASSERT(rli->cur_log_fd == -1); + + /* + Read pointer has to be at the start since we are the only + reader. We must keep the LOCK_log to read the 4 first bytes, as this is a hot log (same as when we call read_log_event() above: for a hot log we take the mutex). - */ - if (check_binlog_magic(cur_log,&errmsg)) + */ + if (check_binlog_magic(cur_log,&errmsg)) { if (!hot_log) pthread_mutex_unlock(log_lock); - goto err; + goto err; } if (!hot_log) pthread_mutex_unlock(log_lock); - continue; + continue; } if (!hot_log) pthread_mutex_unlock(log_lock); /* - if we get here, the log was not hot, so we will have to open it - ourselves. We are sure that the log is still not hot now (a log can get - from hot to cold, but not from cold to hot). No need for LOCK_log. + if we get here, the log was not hot, so we will have to open it + ourselves. We are sure that the log is still not hot now (a log can get + from hot to cold, but not from cold to hot). No need for LOCK_log. */ #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_information("next log '%s' is not active", + sql_print_information("next log '%s' is not active", rli->linfo.log_file_name); -#endif +#endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, - &errmsg)) <0) - goto err; + &errmsg)) <0) + goto err; } else { /* - Read failed with a non-EOF error. - TODO: come up with something better to handle this error + Read failed with a non-EOF error. + TODO: come up with something better to handle this error */ if (hot_log) - pthread_mutex_unlock(log_lock); + pthread_mutex_unlock(log_lock); sql_print_error("Slave SQL thread: I/O error reading \ event(errno: %d cur_log->error: %d)", - my_errno,cur_log->error); + my_errno,cur_log->error); // set read position to the beginning of the event my_b_seek(cur_log,rli->event_relay_log_pos); /* otherwise, we have had a partial read */ errmsg = "Aborting slave SQL thread because of partial event read"; - break; // To end of function + break; // To end of function } } if (!errmsg && global_system_variables.log_warnings) { - sql_print_information("Error reading relay log event: %s", + sql_print_information("Error reading relay log event: %s", "slave SQL thread was killed"); DBUG_RETURN(0); } @@ -4941,7 +4939,7 @@ err: /* Rotate a relay log (this is used only by FLUSH LOGS; the automatic rotation because of size is simpler because when we do it we already have all relevant - locks; here we don't, so this function is mainly taking locks). + locks; here we don't, so this function is mainly taking locks). Returns nothing as we cannot catch any error (MYSQL_BIN_LOG::new_file() is void). */ @@ -4954,7 +4952,7 @@ void rotate_relay_log(MASTER_INFO* mi) /* We don't lock rli->run_lock. This would lead to deadlocks. */ pthread_mutex_lock(&mi->run_lock); - /* + /* We need to test inited because otherwise, new_file() will attempt to lock LOCK_log, which may not be inited (if we're not a slave). */ @@ -5003,7 +5001,7 @@ struct st_reload_entry Sorted array of table names, please keep it sorted since we are using bsearch() on it below. */ -static st_reload_entry s_mysql_tables[] = +static st_reload_entry s_mysql_tables[] = { { "columns_priv", st_relay_log_info::RELOAD_GRANT_F }, { "db", st_relay_log_info::RELOAD_ACCESS_F }, @@ -5026,7 +5024,7 @@ static int reload_entry_compare(const void *lhs, const void *rhs) } void st_relay_log_info::touching_table(char const* db, char const* table, - ulong table_id) + ulong table_id) { DBUG_ENTER("st_relay_log_info::touching_table"); @@ -5053,7 +5051,7 @@ void st_relay_log_info::touching_table(char const* db, char const* table, DBUG_VOID_RETURN; } -void st_relay_log_info::transaction_end(THD* thd) +void st_relay_log_info::transaction_end(THD* thd) { DBUG_ENTER("st_relay_log_info::transaction_end"); @@ -5108,4 +5106,3 @@ template class I_List_iterator<i_string_pair>; #endif #endif /* HAVE_REPLICATION */ - diff --git a/sql/sp.cc b/sql/sp.cc index e5c565150d8..4c8cc6156b7 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1630,6 +1630,7 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex, sp->add_used_tables_to_table_list(thd, &lex->query_tables_last, rt->belong_to_view); } + sp->propagate_attributes(lex); } first= FALSE; } @@ -1727,14 +1728,16 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, { for (int j= 0; j < (int)TRG_ACTION_MAX; j++) { - if (triggers->bodies[i][j]) + sp_head *trigger_body= triggers->bodies[i][j]; + if (trigger_body) { - (void)triggers->bodies[i][j]-> - add_used_tables_to_table_list(thd, &lex->query_tables_last, - table->belong_to_view); + (void)trigger_body-> + add_used_tables_to_table_list(thd, &lex->query_tables_last, + table->belong_to_view); sp_update_stmt_used_routines(thd, lex, - &triggers->bodies[i][j]->m_sroutines, + &trigger_body->m_sroutines, table->belong_to_view); + trigger_body->propagate_attributes(lex); } } } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 81f5d502ec9..029c6fa22a9 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1661,6 +1661,16 @@ sp_head::restore_lex(THD *thd) oldlex->next_state= sublex->next_state; oldlex->trg_table_fields.push_back(&sublex->trg_table_fields); +#ifdef HAVE_ROW_BASED_REPLICATION + /* + If this substatement needs row-based, the entire routine does too (we + cannot switch from statement-based to row-based only for this + substatement). + */ + if (sublex->binlog_row_based_if_mixed) + m_flags|= BINLOG_ROW_BASED_IF_MIXED; +#endif + /* Add routines which are used by statement to respective set for this routine. diff --git a/sql/sp_head.h b/sql/sp_head.h index 2fd38b7176c..4712647b6f4 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -117,7 +117,8 @@ public: /* Is set if a procedure with COMMIT (implicit or explicit) | ROLLBACK */ HAS_COMMIT_OR_ROLLBACK= 128, LOG_SLOW_STATEMENTS= 256, // Used by events - LOG_GENERAL_LOG= 512 // Used by events + LOG_GENERAL_LOG= 512, // Used by events + BINLOG_ROW_BASED_IF_MIXED= 1024 }; /* TYPE_ENUM_FUNCTION, TYPE_ENUM_PROCEDURE or TYPE_ENUM_TRIGGER */ @@ -342,6 +343,25 @@ public: int show_routine_code(THD *thd); #endif + /* + This method is intended for attributes of a routine which need + to propagate upwards to the LEX of the caller (when a property of a + sp_head needs to "taint" the caller). + */ + void propagate_attributes(LEX *lex) + { +#ifdef HAVE_ROW_BASED_REPLICATION + /* + If this routine needs row-based binary logging, the entire top statement + too (we cannot switch from statement-based to row-based only for this + routine, as in statement-based the top-statement may be binlogged and + the substatements not). + */ + if (m_flags & BINLOG_ROW_BASED_IF_MIXED) + lex->binlog_row_based_if_mixed= TRUE; +#endif + } + private: diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index deb7fe2d2b8..e82ad611427 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -5749,25 +5749,30 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, TABLE_LIST tables[1]; List<LEX_USER> user_list; bool result; + ACL_USER *au; + char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; DBUG_ENTER("sp_grant_privileges"); if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) DBUG_RETURN(TRUE); combo->user.str= sctx->user; - + VOID(pthread_mutex_lock(&acl_cache->lock)); - if (!find_acl_user(combo->host.str=(char*)sctx->host_or_ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE)) - { - VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_RETURN(TRUE); - } + + if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE))) + goto found_acl; + if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))) + goto found_acl; + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(TRUE); + + found_acl: VOID(pthread_mutex_unlock(&acl_cache->lock)); bzero((char*)tables, sizeof(TABLE_LIST)); @@ -5775,13 +5780,37 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, tables->db= (char*)sp_db; tables->table_name= tables->alias= (char*)sp_name; - + combo->host.length= strlen(combo->host.str); combo->user.length= strlen(combo->user.str); combo->host.str= thd->strmake(combo->host.str,combo->host.length); combo->user.str= thd->strmake(combo->user.str,combo->user.length); - combo->password.str= (char*)""; - combo->password.length= 0; + + + if(au && au->salt_len) + { + if (au->salt_len == SCRAMBLE_LENGTH) + { + make_password_from_salt(passwd_buff, au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } + else if (au->salt_len == SCRAMBLE_LENGTH_323) + { + make_password_from_salt_323(passwd_buff, (ulong *) au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + else + { + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } + combo->password.str= passwd_buff; + } + else + { + combo->password.str= (char*)""; + combo->password.length= 0; + } if (user_list.push_back(combo)) DBUG_RETURN(TRUE); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index c2ebd140720..28393e4b964 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -49,6 +49,8 @@ static bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias, static void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, bool send_refresh); static bool reopen_table(TABLE *table); +static bool +has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables); extern "C" byte *table_cache_key(const byte *record,uint *length, @@ -1179,135 +1181,134 @@ static inline uint tmpkeyval(THD *thd, TABLE *table) void close_temporary_tables(THD *thd) { - TABLE *next, *prev_table, *table; - char *query= 0, *end; - uint query_buf_size, max_names_len; - bool found_user_tables; - + TABLE *table; if (!thd->temporary_tables) return; - - LINT_INIT(end); - query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS - /* - insertion sort of temp tables by pseudo_thread_id to build ordered list + if (!mysql_bin_log.is_open() || thd->current_stmt_binlog_row_based) + { + TABLE *next; + for (table= thd->temporary_tables; table; table= next) + { + next=table->next; + close_temporary(table, 1, 1); + } + thd->temporary_tables= 0; + return; + } + + TABLE *next, + *prev_table /* TODO: 5.1 maintaines prev link in temporary_tables + double-linked list so we could fix it. But it is not necessary + at this time when the list is being destroyed */; + bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */ + // Better add "if exists", in case a RESET MASTER has been done + const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "; + uint stub_len= sizeof(stub) - 1; + char buf[256]; + memcpy(buf, stub, stub_len); + String s_query= String(buf, sizeof(buf), system_charset_info); + bool found_user_tables= false; + LINT_INIT(next); + + /* + insertion sort of temp tables by pseudo_thread_id to build ordered list of sublists of equal pseudo_thread_id */ - for (prev_table= thd->temporary_tables, - table= prev_table->next, - found_user_tables= (prev_table->s->table_name.str[0] != '#'); + + for (prev_table= thd->temporary_tables, table= prev_table->next; table; prev_table= table, table= table->next) { - TABLE *prev_sorted /* same as for prev_table */, - *sorted; - /* - table not created directly by the user is moved to the tail. - Fixme/todo: nothing (I checked the manual) prevents user to create temp - with `#' - */ - if (table->s->table_name.str[0] == '#') - continue; - else + TABLE *prev_sorted /* same as for prev_table */, *sorted; + if (is_user_table(table)) { - found_user_tables = 1; - } - for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table; - prev_sorted= sorted, sorted= sorted->next) - { - if (sorted->s->table_name.str[0] == '#' || tmpkeyval(thd, sorted) > tmpkeyval(thd, table)) + if (!found_user_tables) + found_user_tables= true; + for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table; + prev_sorted= sorted, sorted= sorted->next) { - /* move into the sorted part of the list from the unsorted */ - prev_table->next= table->next; - table->next= sorted; - if (prev_sorted) + if (!is_user_table(sorted) || + tmpkeyval(thd, sorted) > tmpkeyval(thd, table)) { - prev_sorted->next= table; - } - else - { - thd->temporary_tables= table; + /* move into the sorted part of the list from the unsorted */ + prev_table->next= table->next; + table->next= sorted; + if (prev_sorted) + { + prev_sorted->next= table; + } + else + { + thd->temporary_tables= table; + } + table= prev_table; + break; } - table= prev_table; - break; } } - } - /* - calc query_buf_size as max per sublists, one sublist per pseudo thread id. - Also stop at first occurence of `#'-named table that starts - all implicitly created temp tables - */ - for (max_names_len= 0, table=thd->temporary_tables; - table && table->s->table_name.str[0] != '#'; - table=table->next) - { - uint tmp_names_len; - for (tmp_names_len= table->s->table_cache_key.length + 1; - table->next && table->s->table_name.str[0] != '#' && - tmpkeyval(thd, table) == tmpkeyval(thd, table->next); - table=table->next) - { - /* - We are going to add 4 ` around the db/table names, so 1 might not look - enough; indeed it is enough, because table->s->table_cache_key.length is - greater (by 8, because of server_id and thread_id) than db||table. - */ - tmp_names_len += table->next->s->table_cache_key.length + 1; - } - if (tmp_names_len > max_names_len) max_names_len= tmp_names_len; } - - /* allocate */ - if (found_user_tables && mysql_bin_log.is_open() && - !thd->current_stmt_binlog_row_based && - (query = alloc_root(thd->mem_root, query_buf_size+= max_names_len))) - // Better add "if exists", in case a RESET MASTER has been done - end= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "); + + /* We always quote db,table names though it is slight overkill */ + if (found_user_tables && + !(was_quote_show= (thd->options & OPTION_QUOTE_SHOW_CREATE))) + { + thd->options |= OPTION_QUOTE_SHOW_CREATE; + } /* scan sorted tmps to generate sequence of DROP */ - for (table=thd->temporary_tables; table; table= next) + for (table= thd->temporary_tables; table; table= next) { - if (query // we might be out of memory, but this is not fatal - && table->s->table_name.str[0] != '#') + if (is_user_table(table)) { - char *end_cur; /* Set pseudo_thread_id to be that of the processed table */ thd->variables.pseudo_thread_id= tmpkeyval(thd, table); /* Loop forward through all tables within the sublist of common pseudo_thread_id to create single DROP query */ - for (end_cur= end; - table && table->s->table_name.str[0] != '#' && + for (s_query.length(stub_len); + table && is_user_table(table) && tmpkeyval(thd, table) == thd->variables.pseudo_thread_id; table= next) { - end_cur= strxmov(end_cur, "`", table->s->db.str, "`.`", - table->s->table_name.str, "`,", NullS); + /* + We are going to add 4 ` around the db/table names and possible more + due to special characters in the names + */ + append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str)); + s_query.q_append('.'); + append_identifier(thd, &s_query, table->s->table_name.str, + strlen(table->s->table_name.str)); + s_query.q_append(','); next= table->next; close_temporary(table, 1, 1); } thd->clear_error(); - /* The -1 is to remove last ',' */ - Query_log_event qinfo(thd, query, (ulong)(end_cur - query) - 1, 0, FALSE); + CHARSET_INFO *cs_save= thd->variables.character_set_client; + thd->variables.character_set_client= system_charset_info; + Query_log_event qinfo(thd, s_query.ptr(), + s_query.length() - 1 /* to remove trailing ',' */, + 0, FALSE); + thd->variables.character_set_client= cs_save; /* - Imagine the thread had created a temp table, then was doing a SELECT, - and the SELECT was killed. Then it's not clever to mark the statement - above as "killed", because it's not really a statement updating data, - and there are 99.99% chances it will succeed on slave. If a real update - (one updating a persistent table) was killed on the master, then this - real update will be logged with error_code=killed, rightfully causing - the slave to stop. + Imagine the thread had created a temp table, then was doing a SELECT, and + the SELECT was killed. Then it's not clever to mark the statement above as + "killed", because it's not really a statement updating data, and there + are 99.99% chances it will succeed on slave. + If a real update (one updating a persistent table) was killed on the + master, then this real update will be logged with error_code=killed, + rightfully causing the slave to stop. */ qinfo.error_code= 0; mysql_bin_log.write(&qinfo); } - else + else { next= table->next; close_temporary(table, 1, 1); } } + if (!was_quote_show) + thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */ thd->temporary_tables=0; } @@ -3317,6 +3318,14 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) *need_reopen= FALSE; +#ifdef HAVE_ROW_BASED_REPLICATION + /* + CREATE ... SELECT UUID() locks no tables, we have to test here. + */ + if (thd->lex->binlog_row_based_if_mixed) + thd->set_current_stmt_binlog_row_based_if_mixed(); +#endif /*HAVE_ROW_BASED_REPLICATION*/ + if (!tables) DBUG_RETURN(0); @@ -3347,6 +3356,19 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) { thd->in_lock_tables=1; thd->options|= OPTION_TABLE_LOCK; +#ifdef HAVE_ROW_BASED_REPLICATION + /* + If we have >= 2 different tables to update with auto_inc columns, + statement-based binlogging won't work. We can solve this problem in + mixed mode by switching to row-based binlogging: + */ + if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED && + has_two_write_locked_tables_with_auto_increment(tables)) + { + thd->lex->binlog_row_based_if_mixed= TRUE; + thd->set_current_stmt_binlog_row_based_if_mixed(); + } +#endif } if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start), @@ -6479,3 +6501,46 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table DBUG_VOID_RETURN; } + +/* + Tells if two (or more) tables have auto_increment columns and we want to + lock those tables with a write lock. + + SYNOPSIS + has_two_write_locked_tables_with_auto_increment + tables Table list + + NOTES: + Call this function only when you have established the list of all tables + which you'll want to update (including stored functions, triggers, views + inside your statement). + + RETURN + 0 No + 1 Yes +*/ + +static bool +has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables) +{ + char *first_table_name= NULL, *first_db; + for (TABLE_LIST *table= tables; table; table= table->next_global) + { + /* we must do preliminary checks as table->table may be NULL */ + if (!table->placeholder() && !table->schema_table && + table->table->found_next_number_field && + (table->lock_type >= TL_WRITE_ALLOW_WRITE)) + { + if (first_table_name == NULL) + { + first_table_name= table->table_name; + first_db= table->db; + DBUG_ASSERT(first_db); + } + else if (strcmp(first_db, table->db) || + strcmp(first_table_name, table->table_name)) + return 1; + } + } + return 0; +} diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 1a7d5d4b89a..33069e84809 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -208,8 +208,12 @@ THD::THD() #endif /*HAVE_ROW_BASED_REPLICATION*/ global_read_lock(0), is_fatal_error(0), rand_used(0), time_zone_used(0), - last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0), + arg_of_last_insert_id_function(FALSE), + first_successful_insert_id_in_prev_stmt(0), + first_successful_insert_id_in_prev_stmt_for_binlog(0), + first_successful_insert_id_in_cur_stmt(0), in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE), + stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE), spcont(NULL) { stmt_arena= this; @@ -224,7 +228,6 @@ THD::THD() killed= NOT_KILLED; db_length= col_access=0; query_error= tmp_table_used= 0; - next_insert_id=last_insert_id=0; hash_clear(&handler_tables_hash); tmp_table=0; used_tables=0; @@ -628,11 +631,14 @@ bool THD::store_globals() void THD::cleanup_after_query() { - if (clear_next_insert_id) + if (first_successful_insert_id_in_cur_stmt > 0) { - clear_next_insert_id= 0; - next_insert_id= 0; + /* set what LAST_INSERT_ID() will return */ + first_successful_insert_id_in_prev_stmt= + first_successful_insert_id_in_cur_stmt; + first_successful_insert_id_in_cur_stmt= 0; } + arg_of_last_insert_id_function= 0; /* Free Items that were created during this execution */ free_items(); /* Reset where. */ @@ -2139,18 +2145,16 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, backup->in_sub_stmt= in_sub_stmt; backup->no_send_ok= net.no_send_ok; backup->enable_slow_log= enable_slow_log; - backup->last_insert_id= last_insert_id; - backup->next_insert_id= next_insert_id; - backup->current_insert_id= current_insert_id; - backup->insert_id_used= insert_id_used; - backup->last_insert_id_used= last_insert_id_used; - backup->clear_next_insert_id= clear_next_insert_id; backup->limit_found_rows= limit_found_rows; backup->examined_row_count= examined_row_count; backup->sent_row_count= sent_row_count; backup->cuted_fields= cuted_fields; backup->client_capabilities= client_capabilities; backup->savepoints= transaction.savepoints; + backup->first_successful_insert_id_in_prev_stmt= + first_successful_insert_id_in_prev_stmt; + backup->first_successful_insert_id_in_cur_stmt= + first_successful_insert_id_in_cur_stmt; if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) && !current_stmt_binlog_row_based) @@ -2160,12 +2164,11 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, /* Disable result sets */ client_capabilities &= ~CLIENT_MULTI_RESULTS; in_sub_stmt|= new_state; - next_insert_id= 0; - insert_id_used= 0; examined_row_count= 0; sent_row_count= 0; cuted_fields= 0; transaction.savepoints= 0; + first_successful_insert_id_in_cur_stmt= 0; /* Surpress OK packets in case if we will execute statements */ net.no_send_ok= TRUE; @@ -2193,12 +2196,10 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup) in_sub_stmt= backup->in_sub_stmt; net.no_send_ok= backup->no_send_ok; enable_slow_log= backup->enable_slow_log; - last_insert_id= backup->last_insert_id; - next_insert_id= backup->next_insert_id; - current_insert_id= backup->current_insert_id; - insert_id_used= backup->insert_id_used; - last_insert_id_used= backup->last_insert_id_used; - clear_next_insert_id= backup->clear_next_insert_id; + first_successful_insert_id_in_prev_stmt= + backup->first_successful_insert_id_in_prev_stmt; + first_successful_insert_id_in_cur_stmt= + backup->first_successful_insert_id_in_cur_stmt; limit_found_rows= backup->limit_found_rows; sent_row_count= backup->sent_row_count; client_capabilities= backup->client_capabilities; @@ -2712,6 +2713,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, bool is_trans, bool suppress_use) { DBUG_ENTER("THD::binlog_query"); + DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query)); DBUG_ASSERT(query && mysql_bin_log.is_open()); switch (qtype) { @@ -2779,4 +2781,26 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, DBUG_RETURN(0); } +bool Discrete_intervals_list::append(ulonglong start, ulonglong val, + ulonglong incr) +{ + DBUG_ENTER("Discrete_intervals_list::append"); + /* first, see if this can be merged with previous */ + if ((head == NULL) || tail->merge_if_contiguous(start, val, incr)) + { + /* it cannot, so need to add a new interval */ + Discrete_interval *new_interval= new Discrete_interval(start, val, incr); + if (unlikely(new_interval == NULL)) // out of memory + DBUG_RETURN(1); + DBUG_PRINT("info",("adding new auto_increment interval")); + if (head == NULL) + head= current= new_interval; + else + tail->next= new_interval; + tail= new_interval; + elements++; + } + DBUG_RETURN(0); +} + #endif /* !defined(MYSQL_CLIENT) */ diff --git a/sql/sql_class.h b/sql/sql_class.h index b79f0753603..0e8ef66bd4e 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -770,12 +770,14 @@ class Sub_statement_state { public: ulonglong options; - ulonglong last_insert_id, next_insert_id, current_insert_id; + ulonglong first_successful_insert_id_in_prev_stmt; + ulonglong first_successful_insert_id_in_cur_stmt, insert_id_for_cur_row; + Discrete_interval auto_inc_interval_for_cur_row; ulonglong limit_found_rows; ha_rows cuted_fields, sent_row_count, examined_row_count; ulong client_capabilities; uint in_sub_stmt; - bool enable_slow_log, insert_id_used, clear_next_insert_id; + bool enable_slow_log; bool last_insert_id_used; my_bool no_send_ok; SAVEPOINT *savepoints; @@ -1071,24 +1073,138 @@ public: Note: in the parser, stmt_arena == thd, even for PS/SP. */ Query_arena *stmt_arena; + /* Tells if LAST_INSERT_ID(#) was called for the current statement */ + bool arg_of_last_insert_id_function; /* - next_insert_id is set on SET INSERT_ID= #. This is used as the next - generated auto_increment value in handler.cc + ALL OVER THIS FILE, "insert_id" means "*automatically generated* value for + insertion into an auto_increment column". */ - ulonglong next_insert_id; - /* Remember last next_insert_id to reset it if something went wrong */ - ulonglong prev_insert_id; /* - The insert_id used for the last statement or set by SET LAST_INSERT_ID=# - or SELECT LAST_INSERT_ID(#). Used for binary log and returned by - LAST_INSERT_ID() + This is the first autogenerated insert id which was *successfully* + inserted by the previous statement (exactly, if the previous statement + didn't successfully insert an autogenerated insert id, then it's the one + of the statement before, etc). + It can also be set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#). + It is returned by LAST_INSERT_ID(). */ - ulonglong last_insert_id; + ulonglong first_successful_insert_id_in_prev_stmt; /* - Set to the first value that LAST_INSERT_ID() returned for the last - statement. When this is set, last_insert_id_used is set to true. + Variant of the above, used for storing in statement-based binlog. The + difference is that the one above can change as the execution of a stored + function progresses, while the one below is set once and then does not + change (which is the value which statement-based binlog needs). */ - ulonglong current_insert_id; + ulonglong first_successful_insert_id_in_prev_stmt_for_binlog; + /* + This is the first autogenerated insert id which was *successfully* + inserted by the current statement. It is maintained only to set + first_successful_insert_id_in_prev_stmt when statement ends. + */ + ulonglong first_successful_insert_id_in_cur_stmt; + /* + We follow this logic: + - when stmt starts, first_successful_insert_id_in_prev_stmt contains the + first insert id successfully inserted by the previous stmt. + - as stmt makes progress, handler::insert_id_for_cur_row changes; every + time get_auto_increment() is called, auto_inc_intervals_for_binlog is + augmented with the reserved interval (if statement-based binlogging). + - at first successful insertion of an autogenerated value, + first_successful_insert_id_in_cur_stmt is set to + handler::insert_id_for_cur_row. + - when stmt goes to binlog, auto_inc_intervals_for_binlog is + binlogged if non-empty. + - when stmt ends, first_successful_insert_id_in_prev_stmt is set to + first_successful_insert_id_in_cur_stmt. + */ + /* + stmt_depends_on_first_successful_insert_id_in_prev_stmt is set when + LAST_INSERT_ID() is used by a statement. + If it is set, first_successful_insert_id_in_prev_stmt_for_binlog will be + stored in the statement-based binlog. + This variable is CUMULATIVE along the execution of a stored function or + trigger: if one substatement sets it to 1 it will stay 1 until the + function/trigger ends, thus making sure that + first_successful_insert_id_in_prev_stmt_for_binlog does not change anymore + and is propagated to the caller for binlogging. + */ + bool stmt_depends_on_first_successful_insert_id_in_prev_stmt; + /* + List of auto_increment intervals reserved by the thread so far, for + storage in the statement-based binlog. + Note that its minimum is not first_successful_insert_id_in_cur_stmt: + assuming a table with an autoinc column, and this happens: + INSERT INTO ... VALUES(3); + SET INSERT_ID=3; INSERT IGNORE ... VALUES (NULL); + then the latter INSERT will insert no rows + (first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3" + in the binlog is still needed; the list's minimum will contain 3. + */ + Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog; + /* Used by replication and SET INSERT_ID */ + Discrete_intervals_list auto_inc_intervals_forced; + /* + There is BUG#19630 where statement-based replication of stored + functions/triggers with two auto_increment columns breaks. + We however ensure that it works when there is 0 or 1 auto_increment + column; our rules are + a) on master, while executing a top statement involving substatements, + first top- or sub- statement to generate auto_increment values wins the + exclusive right to see its values be written to binlog (the write + will be done by the statement or its caller), and the losers won't see + their values be written to binlog. + b) on slave, while replicating a top statement involving substatements, + first top- or sub- statement to need to read auto_increment values from + the master's binlog wins the exclusive right to read them (so the losers + won't read their values from binlog but instead generate on their own). + a) implies that we mustn't backup/restore + auto_inc_intervals_in_cur_stmt_for_binlog. + b) implies that we mustn't backup/restore auto_inc_intervals_forced. + + If there are more than 1 auto_increment columns, then intervals for + different columns may mix into the + auto_inc_intervals_in_cur_stmt_for_binlog list, which is logically wrong, + but there is no point in preventing this mixing by preventing intervals + from the secondly inserted column to come into the list, as such + prevention would be wrong too. + What will happen in the case of + INSERT INTO t1 (auto_inc) VALUES(NULL); + where t1 has a trigger which inserts into an auto_inc column of t2, is + that in binlog we'll store the interval of t1 and the interval of t2 (when + we store intervals, soon), then in slave, t1 will use both intervals, t2 + will use none; if t1 inserts the same number of rows as on master, + normally the 2nd interval will not be used by t1, which is fine. t2's + values will be wrong if t2's internal auto_increment counter is different + from what it was on master (which is likely). In 5.1, in mixed binlogging + mode, row-based binlogging is used for such cases where two + auto_increment columns are inserted. + */ + inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id) + { + if (first_successful_insert_id_in_cur_stmt == 0) + first_successful_insert_id_in_cur_stmt= id; + } + inline ulonglong read_first_successful_insert_id_in_prev_stmt(void) + { + if (!stmt_depends_on_first_successful_insert_id_in_prev_stmt) + { + /* It's the first time we read it */ + first_successful_insert_id_in_prev_stmt_for_binlog= + first_successful_insert_id_in_prev_stmt; + stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1; + } + return first_successful_insert_id_in_prev_stmt; + } + /* + Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#" + (mysqlbinlog). We'll soon add a variant which can take many intervals in + argument. + */ + inline void force_one_auto_inc_interval(ulonglong next_id) + { + auto_inc_intervals_forced.empty(); // in case of multiple SET INSERT_ID + auto_inc_intervals_forced.append(next_id, ULONGLONG_MAX, 0); + } + ulonglong limit_found_rows; ulonglong options; /* Bitmap of states */ longlong row_count_func; /* For the ROW_COUNT() function */ @@ -1157,7 +1273,6 @@ public: bool last_cuted_field; bool no_errors, password, is_fatal_error; bool query_start_used, rand_used, time_zone_used; - bool last_insert_id_used,insert_id_used, clear_next_insert_id; bool in_lock_tables; bool query_error, bootstrap, cleanup_done; bool tmp_table_used; @@ -1185,9 +1300,10 @@ public: /* Used by the sys_var class to store temporary values */ union { - my_bool my_bool_value; - long long_value; - ulong ulong_value; + my_bool my_bool_value; + long long_value; + ulong ulong_value; + ulonglong ulonglong_value; } sys_var_tmp; struct { @@ -1288,20 +1404,6 @@ public: inline void end_time() { time(&start_time); } inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; } inline void lock_time() { time(&time_after_lock); } - inline void insert_id(ulonglong id_arg) - { - last_insert_id= id_arg; - insert_id_used=1; - } - inline ulonglong insert_id(void) - { - if (!last_insert_id_used) - { - last_insert_id_used=1; - current_insert_id=last_insert_id; - } - return last_insert_id; - } inline ulonglong found_rows(void) { return limit_found_rows; @@ -1418,7 +1520,17 @@ public: inline void set_current_stmt_binlog_row_based_if_mixed() { #ifdef HAVE_ROW_BASED_REPLICATION - if (variables.binlog_format == BINLOG_FORMAT_MIXED) + /* + If in a stored/function trigger, the caller should already have done the + change. We test in_sub_stmt to prevent introducing bugs where people + wouldn't ensure that, and would switch to row-based mode in the middle + of executing a stored function/trigger (which is too late, see also + reset_current_stmt_binlog_row_based()); this condition will make their + tests fail and so force them to propagate the + lex->binlog_row_based_if_mixed upwards to the caller. + */ + if ((variables.binlog_format == BINLOG_FORMAT_MIXED) && + (in_sub_stmt == 0)) current_stmt_binlog_row_based= TRUE; #endif } @@ -1437,8 +1549,26 @@ public: inline void reset_current_stmt_binlog_row_based() { #ifdef HAVE_ROW_BASED_REPLICATION - current_stmt_binlog_row_based= - test(variables.binlog_format == BINLOG_FORMAT_ROW); + /* + If there are temporary tables, don't reset back to + statement-based. Indeed it could be that: + CREATE TEMPORARY TABLE t SELECT UUID(); # row-based + # and row-based does not store updates to temp tables + # in the binlog. + INSERT INTO u SELECT * FROM t; # stmt-based + and then the INSERT will fail as data inserted into t was not logged. + So we continue with row-based until the temp table is dropped. + If we are in a stored function or trigger, we mustn't reset in the + middle of its execution (as the binary logging way of a stored function + or trigger is decided when it starts executing, depending for example on + the caller (for a stored function: if caller is SELECT or + INSERT/UPDATE/DELETE...). + */ + if ((temporary_tables == NULL) && (in_sub_stmt == 0)) + { + current_stmt_binlog_row_based= + test(variables.binlog_format == BINLOG_FORMAT_ROW); + } #else current_stmt_binlog_row_based= FALSE; #endif @@ -1446,21 +1576,22 @@ public: /* Initialize the current database from a NULL-terminated string with length + If we run out of memory, we free the current database and return TRUE. + This way the user will notice the error as there will be no current + database selected (in addition to the error message set by malloc). */ - void set_db(const char *new_db, uint new_db_len) + bool set_db(const char *new_db, uint new_db_len) { - if (new_db) + /* Do not reallocate memory if current chunk is big enough. */ + if (db && new_db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else { - /* Do not reallocate memory if current chunk is big enough. */ - if (db && db_length >= new_db_len) - memcpy(db, new_db, new_db_len+1); - else - { - safeFree(db); - db= my_strndup(new_db, new_db_len, MYF(MY_WME)); - } - db_length= db ? new_db_len: 0; + x_free(db); + db= new_db ? my_strndup(new_db, new_db_len, MYF(MY_WME)) : NULL; } + db_length= db ? new_db_len : 0; + return new_db && !db; } void reset_db(char *new_db, uint new_db_len) { @@ -1630,7 +1761,7 @@ class select_insert :public select_result_interceptor { TABLE_LIST *table_list; TABLE *table; List<Item> *fields; - ulonglong last_insert_id; + ulonglong autoinc_value_of_last_inserted_row; // autogenerated or not COPY_INFO info; bool insert_into_view; @@ -1678,7 +1809,8 @@ public: virtual bool can_rollback_data() { return 1; } // Needed for access from local class MY_HOOKS in prepare(), since thd is proteted. - THD *get_thd(void) { return thd; } + const THD *get_thd(void) { return thd; } + const HA_CREATE_INFO *get_create_info() { return create_info; }; }; #include <myisam.h> diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 41e7e5df1f7..77d99862bf0 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -906,32 +906,13 @@ exit: (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ error= Events::drop_schema_events(thd, db); /* - If this database was the client's selected database, we silently change the - client's selected database to nothing (to have an empty SELECT DATABASE() - in the future). For this we free() thd->db and set it to 0. But we don't do - free() for the slave thread. Indeed, doing a x_free() on it leads to nasty - problems (i.e. long painful debugging) because in this thread, thd->db is - the same as data_buf and db of the Query_log_event which is dropping the - database. So if you free() thd->db, you're freeing data_buf. You set - thd->db to 0 but not data_buf (thd->db and data_buf are two distinct - pointers which point to the same place). Then in ~Query_log_event(), we - have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a - DOUBLE free(). - Side effects of this double free() are, randomly (depends on the machine), - when the slave is replicating a DROP DATABASE: - - garbage characters in the error message: - "Error 'Can't drop database 'test2'; database doesn't exist' on query - 'h4zI©'" - - segfault - - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave - server hangs at shutdown etc). + If this database was the client's selected database, we silently + change the client's selected database to nothing (to have an empty + SELECT DATABASE() in the future). For this we free() thd->db and set + it to 0. */ if (thd->db && !strcmp(thd->db, db)) - { - if (!(thd->slave_thread)) /* a slave thread will free it itself */ - x_free(thd->db); - thd->reset_db(NULL, 0); - } + thd->set_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: @@ -1227,38 +1208,52 @@ err: /* - Change default database. + Change the current database. SYNOPSIS mysql_change_db() - thd Thread handler - name Databasename - no_access_check True don't do access check. In this case name may be "" + thd thread handle + name database name + no_access_check if TRUE, don't do access check. In this + case name may be "" DESCRIPTION - Becasue the database name may have been given directly from the - communication packet (in case of 'connect' or 'COM_INIT_DB') - we have to do end space removal in this function. + Check that the database name corresponds to a valid and + existent database, check access rights (unless called with + no_access_check), and set the current database. This function + is called to change the current database upon user request + (COM_CHANGE_DB command) or temporarily, to execute a stored + routine. NOTES - Do as little as possible in this function, as it is not called for the - replication slave SQL thread (for that thread, setting of thd->db is done - in ::exec_event() methods of log_event.cc). - - This function does not send anything, including error messages to the - client, if that should be sent to the client, call net_send_error after - this function. + This function is not the only way to switch the database that + is currently employed. When the replication slave thread + switches the database before executing a query, it calls + thd->set_db directly. However, if the query, in turn, uses + a stored routine, the stored routine will use this function, + even if it's run on the slave. + + This function allocates the name of the database on the system + heap: this is necessary to be able to uniformly change the + database from any module of the server. Up to 5.0 different + modules were using different memory to store the name of the + database, and this led to memory corruption: a stack pointer + set by Stored Procedures was used by replication after the + stack address was long gone. + + This function does not send anything, including error + messages, to the client. If that should be sent to the client, + call net_send_error after this function. RETURN VALUES - 0 ok + 0 OK 1 error */ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) { - int length, db_length; - char *dbname= thd->slave_thread ? (char *) name : - my_strdup((char *) name, MYF(MY_WME)); + int path_length, db_length; + char *db_name; char path[FN_REFLEN]; HA_CREATE_INFO create; bool system_db= 0; @@ -1270,32 +1265,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) DBUG_ENTER("mysql_change_db"); DBUG_PRINT("enter",("name: '%s'",name)); - LINT_INIT(db_length); - - /* dbname can only be NULL if malloc failed */ - if (!dbname || !(db_length= strlen(dbname))) + if (name == NULL || name[0] == '\0' && no_access_check == FALSE) { - if (no_access_check && dbname) - { - /* Called from SP when orignal database was not set */ - system_db= 1; - goto end; - } - if (!(thd->slave_thread)) - x_free(dbname); /* purecov: inspected */ - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), - MYF(0)); /* purecov: inspected */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); DBUG_RETURN(1); /* purecov: inspected */ } - if (check_db_name(dbname)) + else if (name[0] == '\0') + { + /* Called from SP to restore the original database, which was NULL */ + DBUG_ASSERT(no_access_check); + system_db= 1; + db_name= NULL; + db_length= 0; + goto end; + } + /* + Now we need to make a copy because check_db_name requires a + non-constant argument. TODO: fix check_db_name. + */ + if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL) + DBUG_RETURN(1); /* the error is set */ + db_length= strlen(db_name); + if (check_db_name(db_name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), dbname); - if (!(thd->slave_thread)) - my_free(dbname, MYF(0)); + my_error(ER_WRONG_DB_NAME, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } - DBUG_PRINT("info",("Use database: %s", dbname)); - if (!my_strcasecmp(system_charset_info, dbname, information_schema_name.str)) + DBUG_PRINT("info",("Use database: %s", db_name)); + if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str)) { system_db= 1; #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1310,44 +1308,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) if (test_all_bits(sctx->master_access, DB_ACLS)) db_access=DB_ACLS; else - db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) | + db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) | sctx->master_access); if (!(db_access & DB_ACLS) && (!grant_option || - check_grant_db(thd,dbname))) + check_grant_db(thd,db_name))) { my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), sctx->priv_user, sctx->priv_host, - dbname); + db_name); general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->priv_host, dbname); - if (!(thd->slave_thread)) - my_free(dbname,MYF(0)); + sctx->priv_user, sctx->priv_host, db_name); + my_free(db_name,MYF(0)); DBUG_RETURN(1); } } #endif - length= build_table_filename(path, sizeof(path), dbname, "", ""); - if (length && path[length-1] == FN_LIBCHAR) - path[length-1]=0; // remove ending '\' + path_length= build_table_filename(path, sizeof(path), db_name, "", ""); + if (path_length && path[path_length-1] == FN_LIBCHAR) + path[path_length-1]= '\0'; // remove ending '\' if (my_access(path,F_OK)) { - my_error(ER_BAD_DB_ERROR, MYF(0), dbname); - if (!(thd->slave_thread)) - my_free(dbname,MYF(0)); + my_error(ER_BAD_DB_ERROR, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } end: - if (!(thd->slave_thread)) - x_free(thd->db); - if (dbname && dbname[0] == 0) - { - if (!(thd->slave_thread)) - my_free(dbname, MYF(0)); - thd->reset_db(NULL, 0); - } - else - thd->reset_db(dbname, db_length); // THD::~THD will free this + x_free(thd->db); + DBUG_ASSERT(db_name == NULL || db_name[0] != '\0'); + thd->reset_db(db_name, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index b9ce1a53aaf..fae1d90cc9a 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -986,12 +986,12 @@ trunc_by_del: thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT); ha_enable_transaction(thd, FALSE); mysql_init_select(thd->lex); -#ifdef HAVE_ROW_BASED_REPLICATION + bool save_binlog_row_based= thd->current_stmt_binlog_row_based; thd->clear_current_stmt_binlog_row_based(); -#endif error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0, HA_POS_ERROR, LL(0), TRUE); ha_enable_transaction(thd, TRUE); thd->options= save_options; + thd->current_stmt_binlog_row_based= save_binlog_row_based; DBUG_RETURN(error); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 1f93bc99483..25ed03c4051 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -64,8 +64,8 @@ static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); -static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, - char *query, uint query_length, bool log_on); +static int write_delayed(THD *thd, TABLE *table, enum_duplicates dup, + LEX_STRING query, bool ignore, bool log_on); static void end_delayed_insert(THD *thd); pthread_handler_t handle_delayed_insert(void *arg); static void unlink_blobs(register TABLE *table); @@ -219,9 +219,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, } } } - if (table->found_next_number_field) - table->mark_auto_increment_column(); - table->mark_columns_needed_for_insert(); // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); @@ -448,10 +445,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->next_number_field=table->found_next_number_field; error=0; - id=0; thd->proc_info="update"; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + if (duplic == DUP_REPLACE && + (!table->triggers || !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); /* let's *try* to start bulk inserts. It won't necessary start them as values_list.elements should be greater than @@ -480,6 +479,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, error= 1; } + table->mark_columns_needed_for_insert(); + if (table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)) error= 1; @@ -548,22 +549,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { - error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on); + LEX_STRING const st_query = { query, thd->query_length }; + error=write_delayed(thd, table, duplic, st_query, ignore, log_on); query=0; } else #endif error=write_record(thd, table ,&info); - /* - If auto_increment values are used, save the first one - for LAST_INSERT_ID() and for the update log. - We can't use insert_id() as we don't want to touch the - last_insert_id_used flag. - */ - if (! id && thd->insert_id_used) - { // Get auto increment value - id= thd->last_insert_id; - } if (error) break; thd->row_count++; @@ -571,6 +563,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, free_underlaid_joins(thd, &thd->lex->select_lex); joins_freed= TRUE; + table->file->ha_release_auto_increment(); /* Now all rows are inserted. Time to update logs and sends response to @@ -581,7 +574,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (!error) { - id=0; // No auto_increment id info.copied=values_list.elements; end_delayed_insert(thd); } @@ -595,11 +587,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->file->print_error(my_errno,MYF(0)); error=1; } - if (id && values_list.elements != 1) - thd->insert_id(id); // For update log - else if (table->next_number_field && info.copied) - id=table->next_number_field->val_int(); // Return auto_increment value - transactional_table= table->file->has_transactions(); if ((changed= (info.copied || info.deleted || info.updated))) @@ -648,18 +635,30 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } } thd->proc_info="end"; + /* + We'll report to the client this id: + - if the table contains an autoincrement column and we successfully + inserted an autogenerated value, the autogenerated value. + - if the table contains no autoincrement column and LAST_INSERT_ID(X) was + called, X. + - if the table contains an autoincrement column, and some rows were + inserted, the id of the last "inserted" row (if IGNORE, that value may not + have been really inserted but ignored). + */ + id= (thd->first_successful_insert_id_in_cur_stmt > 0) ? + thd->first_successful_insert_id_in_cur_stmt : + (thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : + ((table->next_number_field && info.copied) ? + table->next_number_field->val_int() : 0)); table->next_number_field=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; - thd->next_insert_id=0; // Reset this if wrongly used if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + if (duplic == DUP_REPLACE && + (!table->triggers || !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); - /* Reset value of LAST_INSERT_ID if no rows where inserted */ - if (!info.copied && thd->insert_id_used) - { - thd->insert_id(0); - id=0; - } if (error) goto abort; if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) || @@ -681,8 +680,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, thd->row_count_func= info.copied+info.deleted+info.updated; ::send_ok(thd, (ulong) thd->row_count_func, id, buff); } - if (table != NULL) - table->file->release_auto_increment(); thd->abort_on_warning= 0; DBUG_RETURN(FALSE); @@ -692,7 +689,7 @@ abort: end_delayed_insert(thd); #endif if (table != NULL) - table->file->release_auto_increment(); + table->file->ha_release_auto_increment(); if (!joins_freed) free_underlaid_joins(thd, &thd->lex->select_lex); thd->abort_on_warning= 0; @@ -1001,6 +998,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) int error, trg_error= 0; char *key=0; MY_BITMAP *save_read_set, *save_write_set; + ulonglong prev_insert_id= table->file->next_insert_id; + ulonglong insert_id_for_cur_row= 0; DBUG_ENTER("write_record"); info->records++; @@ -1013,10 +1012,20 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) while ((error=table->file->ha_write_row(table->record[0]))) { uint key_nr; + /* + If we do more than one iteration of this loop, from the second one the + row will have an explicit value in the autoinc field, which was set at + the first call of handler::update_auto_increment(). So we must save + the autogenerated value to avoid thd->insert_id_for_cur_row to become + 0. + */ + if (table->file->insert_id_for_cur_row > 0) + insert_id_for_cur_row= table->file->insert_id_for_cur_row; + else + table->file->insert_id_for_cur_row= insert_id_for_cur_row; bool is_duplicate_key_error; if (table->file->is_fatal_error(error, HA_CHECK_DUP)) goto err; - table->file->restore_auto_increment(); // it's too early here! BUG#20188 is_duplicate_key_error= table->file->is_fatal_error(error, 0); if (!is_duplicate_key_error) { @@ -1044,7 +1053,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (info->handle_duplicates == DUP_REPLACE && table->next_number_field && key_nr == table->s->next_number_index && - table->file->auto_increment_column_changed) + (insert_id_for_cur_row > 0)) goto err; if (table->file->ha_table_flags() & HA_DUPLICATE_POS) { @@ -1103,22 +1112,29 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (res == VIEW_CHECK_ERROR) goto before_trg_err; - if (thd->clear_next_insert_id) - { - /* Reset auto-increment cacheing if we do an update */ - thd->clear_next_insert_id= 0; - thd->next_insert_id= 0; - } if ((error=table->file->ha_update_row(table->record[1], table->record[0]))) { if (info->ignore && !table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) + { + table->file->restore_auto_increment(prev_insert_id); goto ok_or_after_trg_err; + } goto err; } info->updated++; - + /* + If ON DUP KEY UPDATE updates a row instead of inserting one, and + there is an auto_increment column, then SELECT LAST_INSERT_ID() + returns the id of the updated row: + */ + if (table->next_number_field) + { + longlong field_val= table->next_number_field->val_int(); + thd->record_first_successful_insert_id_in_cur_stmt(field_val); + table->file->adjust_next_insert_id_after_explicit_value(field_val); + } trg_error= (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)); @@ -1147,16 +1163,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) && (!table->triggers || !table->triggers->has_delete_triggers())) { - if (thd->clear_next_insert_id) - { - /* Reset auto-increment cacheing if we do an update */ - thd->clear_next_insert_id= 0; - thd->next_insert_id= 0; - } if ((error=table->file->ha_update_row(table->record[1], table->record[0]))) goto err; info->deleted++; + thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); /* Since we pretend that we have done insert we should call its after triggers. @@ -1185,6 +1196,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } } } + thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); /* Restore column maps if they where replaced during an duplicate key problem. @@ -1198,12 +1210,13 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (!info->ignore || table->file->is_fatal_error(error, HA_CHECK_DUP)) goto err; - table->file->restore_auto_increment(); + table->file->restore_auto_increment(prev_insert_id); goto ok_or_after_trg_err; } after_trg_n_copied_inc: info->copied++; + thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); trg_error= (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_INSERT, TRG_ACTION_AFTER, TRUE)); @@ -1223,6 +1236,7 @@ err: table->file->print_error(error,MYF(0)); before_trg_err: + table->file->restore_auto_increment(prev_insert_id); if (key) my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH); table->column_bitmaps_set(save_read_set, save_write_set); @@ -1285,14 +1299,20 @@ public: char *record; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query; - ulonglong last_insert_id; + bool query_start_used, ignore, log_query; + bool stmt_depends_on_first_successful_insert_id_in_prev_stmt; + ulonglong first_successful_insert_id_in_prev_stmt; timestamp_auto_set_type timestamp_field_type; + LEX_STRING query; - delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg) - :record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {} + delayed_row(LEX_STRING const query_arg, enum_duplicates dup_arg, + bool ignore_arg, bool log_query_arg) + : record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg), + query(query_arg) + {} ~delayed_row() { + x_free(query.str); x_free(record); } }; @@ -1300,9 +1320,6 @@ public: class delayed_insert :public ilink { uint locks_in_memory; - char *query; - ulong query_length; - ulong query_allocated; public: THD thd; TABLE *table; @@ -1316,7 +1333,7 @@ public: TABLE_LIST table_list; // Argument delayed_insert() - :locks_in_memory(0), query(0), query_length(0), query_allocated(0), + :locks_in_memory(0), table(0),tables_in_use(0),stacked_inserts(0), status(0), dead(0), group_count(0) { @@ -1327,6 +1344,11 @@ public: thd.command=COM_DELAYED_INSERT; thd.lex->current_select= 0; // for my_message_sql thd.lex->sql_command= SQLCOM_INSERT; // For innodb::store_lock() + /* + Statement-based replication of INSERT DELAYED has problems with RAND() + and user vars, so in mixed mode we go to row-based. + */ + thd.set_current_stmt_binlog_row_based_if_mixed(); bzero((char*) &thd.net, sizeof(thd.net)); // Safety bzero((char*) &table_list, sizeof(table_list)); // Safety @@ -1342,7 +1364,6 @@ public: } ~delayed_insert() { - my_free(query, MYF(MY_WME|MY_ALLOW_ZERO_PTR)); /* The following is not really needed, but just for safety */ delayed_row *row; while ((row=rows.get())) @@ -1362,25 +1383,6 @@ public: VOID(pthread_cond_broadcast(&COND_thread_count)); /* Tell main we are ready */ } - int set_query(char const *q, ulong qlen) { - if (q && qlen > 0) - { - if (query_allocated < qlen + 1) - { - ulong const flags(MY_WME|MY_FREE_ON_ERROR|MY_ALLOW_ZERO_PTR); - query= my_realloc(query, qlen + 1, MYF(flags)); - if (query == 0) - return HA_ERR_OUT_OF_MEM; - query_allocated= qlen; - } - query_length= qlen; - memcpy(query, q, qlen + 1); - } - else - query_length= 0; - return 0; - } - /* The following is for checking when we can delete ourselves */ inline void lock() { @@ -1658,13 +1660,14 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ -static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, - bool ignore, char *query, uint query_length, - bool log_on) +static int +write_delayed(THD *thd,TABLE *table, enum_duplicates duplic, + LEX_STRING query, bool ignore, bool log_on) { - delayed_row *row=0; + delayed_row *row; delayed_insert *di=thd->di; DBUG_ENTER("write_delayed"); + DBUG_PRINT("enter", ("query = '%s' length %u", query.str, query.length)); thd->proc_info="waiting for handler insert"; pthread_mutex_lock(&di->mutex); @@ -1672,18 +1675,44 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, pthread_cond_wait(&di->cond_client,&di->mutex); thd->proc_info="storing row into queue"; - if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on))) + if (thd->killed) goto err; + /* + Take a copy of the query string, if there is any. The string will + be free'ed when the row is destroyed. If there is no query string, + we don't do anything special. + */ + + if (query.str) + { + char *str; + if (!(str= my_strndup(query.str, query.length, MYF(MY_WME)))) + goto err; + query.str= str; + } + row= new delayed_row(query, duplic, ignore, log_on); + if (row == NULL) + { + my_free(query.str, MYF(MY_WME)); + goto err; + } + if (!(row->record= (char*) my_malloc(table->s->reclength, MYF(MY_WME)))) goto err; memcpy(row->record, table->record[0], table->s->reclength); - di->set_query(query, query_length); row->start_time= thd->start_time; row->query_start_used= thd->query_start_used; - row->last_insert_id_used= thd->last_insert_id_used; - row->insert_id_used= thd->insert_id_used; - row->last_insert_id= thd->last_insert_id; + /* + those are for the binlog: LAST_INSERT_ID() has been evaluated at this + time, so record does not need it, but statement-based binlogging of the + INSERT will need when the row is actually inserted. + As for SET INSERT_ID, DELAYED does not honour it (BUG#20830). + */ + row->stmt_depends_on_first_successful_insert_id_in_prev_stmt= + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt; + row->first_successful_insert_id_in_prev_stmt= + thd->first_successful_insert_id_in_prev_stmt; row->timestamp_field_type= table->timestamp_field_type; di->rows.push_back(row); @@ -1937,6 +1966,7 @@ pthread_handler_t handle_delayed_insert(void *arg) MYSQL_LOCK *lock=thd->lock; thd->lock=0; pthread_mutex_unlock(&di->mutex); + di->table->file->ha_release_auto_increment(); mysql_unlock_tables(thd, lock); di->group_count=0; pthread_mutex_lock(&di->mutex); @@ -2012,9 +2042,8 @@ bool delayed_insert::handle_inserts(void) { int error; ulong max_rows; - bool using_ignore=0, - using_bin_log= mysql_bin_log.is_open(); - + bool using_ignore= 0, using_opt_replace= 0, + using_bin_log= mysql_bin_log.is_open(); delayed_row *row; DBUG_ENTER("handle_inserts"); @@ -2037,7 +2066,7 @@ bool delayed_insert::handle_inserts(void) if (thd.killed || table->s->version != refresh_version) { thd.killed= THD::KILL_CONNECTION; - max_rows= ~(ulong)0; // Do as much as possible + max_rows= ULONG_MAX; // Do as much as possible } /* @@ -2049,13 +2078,6 @@ bool delayed_insert::handle_inserts(void) table->file->extra(HA_EXTRA_WRITE_CACHE); pthread_mutex_lock(&mutex); - /* Reset auto-increment cacheing */ - if (thd.clear_next_insert_id) - { - thd.next_insert_id= 0; - thd.clear_next_insert_id= 0; - } - while ((row=rows.get())) { stacked_inserts--; @@ -2064,9 +2086,12 @@ bool delayed_insert::handle_inserts(void) thd.start_time=row->start_time; thd.query_start_used=row->query_start_used; - thd.last_insert_id=row->last_insert_id; - thd.last_insert_id_used=row->last_insert_id_used; - thd.insert_id_used=row->insert_id_used; + /* for the binlog, forget auto_increment ids generated by previous rows */ +// thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + thd.first_successful_insert_id_in_prev_stmt= + row->first_successful_insert_id_in_prev_stmt; + thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt= + row->stmt_depends_on_first_successful_insert_id_in_prev_stmt; table->timestamp_field_type= row->timestamp_field_type; info.ignore= row->ignore; @@ -2077,6 +2102,13 @@ bool delayed_insert::handle_inserts(void) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); using_ignore=1; } + if (info.handle_duplicates == DUP_REPLACE && + (!table->triggers || + !table->triggers->has_delete_triggers())) + { + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + using_opt_replace= 1; + } thd.clear_error(); // reset error for binlog if (write_record(&thd, table, &info)) { @@ -2084,11 +2116,33 @@ bool delayed_insert::handle_inserts(void) thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); row->log_query = 0; } + if (using_ignore) { using_ignore=0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); } + if (using_opt_replace) + { + using_opt_replace= 0; + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); + } + + if (row->log_query && row->query.str != NULL && mysql_bin_log.is_open()) + { + /* + If the query has several rows to insert, only the first row will come + here. In row-based binlogging, this means that the first row will be + written to binlog as one Table_map event and one Rows event (due to an + event flush done in binlog_query()), then all other rows of this query + will be binlogged together as one single Table_map event and one + single Rows event. + */ + thd.binlog_query(THD::ROW_QUERY_TYPE, + row->query.str, row->query.length, + FALSE, FALSE); + } + if (table->s->blob_fields) free_delayed_insert_blobs(table); thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status); @@ -2135,13 +2189,25 @@ bool delayed_insert::handle_inserts(void) pthread_cond_broadcast(&cond_client); // If waiting clients } } - thd.proc_info=0; pthread_mutex_unlock(&mutex); - /* After releasing the mutex, to prevent deadlocks. */ - if (mysql_bin_log.is_open()) - thd.binlog_query(THD::ROW_QUERY_TYPE, query, query_length, FALSE, FALSE); +#ifdef HAVE_ROW_BASED_REPLICATION + /* + We need to flush the pending event when using row-based + replication since the flushing normally done in binlog_query() is + not done last in the statement: for delayed inserts, the insert + statement is logged *before* all rows are inserted. + + We can flush the pending event without checking the thd->lock + since the delayed insert *thread* is not inside a stored function + or trigger. + + TODO: Move the logging to last in the sequence of rows. + */ + if (thd.current_stmt_binlog_row_based) + thd.binlog_flush_pending_rows_event(TRUE); +#endif /* HAVE_ROW_BASED_REPLICATION */ if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { // This shouldn't happen @@ -2229,7 +2295,7 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, enum_duplicates duplic, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), - last_insert_id(0), + autoinc_value_of_last_inserted_row(0), insert_into_view(table_list_par && table_list_par->view != 0) { bzero((char*) &info,sizeof(info)); @@ -2334,6 +2400,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) thd->cuted_fields=0; if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + if (info.handle_duplicates == DUP_REPLACE && + (!table->triggers || !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); thd->no_trans_update= 0; thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & @@ -2343,6 +2412,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) check_that_all_fields_are_given_values(thd, table, table_list)) || table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)); + + if (!res) + table->mark_columns_needed_for_insert(); + DBUG_RETURN(res); } @@ -2438,15 +2511,20 @@ bool select_insert::send_data(List<Item> &values) if (table->next_number_field) { /* + If no value has been autogenerated so far, we need to remember the + value we just saw, we may need to send it to client in the end. + */ + if (thd->first_successful_insert_id_in_cur_stmt == 0) // optimization + autoinc_value_of_last_inserted_row= + table->next_number_field->val_int(); + /* Clear auto-increment field for the next record, if triggers are used we will clear it twice, but this should be cheap. */ table->next_number_field->reset(); - if (!last_insert_id && thd->insert_id_used) - last_insert_id= thd->insert_id(); } } - table->file->release_auto_increment(); + table->file->ha_release_auto_increment(); DBUG_RETURN(error); } @@ -2508,8 +2586,6 @@ void select_insert::send_error(uint errcode,const char *err) { if (!table->file->has_transactions()) { - if (last_insert_id) - thd->insert_id(last_insert_id); // For binary log if (mysql_bin_log.is_open()) { thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length, @@ -2529,10 +2605,12 @@ void select_insert::send_error(uint errcode,const char *err) bool select_insert::send_eof() { int error,error2; + ulonglong id; DBUG_ENTER("select_insert::send_eof"); error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); if (info.copied || info.deleted || info.updated) { @@ -2554,8 +2632,6 @@ bool select_insert::send_eof() thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; } - if (last_insert_id) - thd->insert_id(last_insert_id); // For binary log /* Write to binlog before commiting transaction. No statement will be written by the binlog_query() below in RBR mode. All the @@ -2585,7 +2661,13 @@ bool select_insert::send_eof() sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); thd->row_count_func= info.copied+info.deleted+info.updated; - ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); + + id= (thd->first_successful_insert_id_in_cur_stmt > 0) ? + thd->first_successful_insert_id_in_cur_stmt : + (thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : + (info.copied ? autoinc_value_of_last_inserted_row : 0)); + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); DBUG_RETURN(0); } @@ -2751,21 +2833,6 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, } -class MY_HOOKS : public TABLEOP_HOOKS -{ -public: - MY_HOOKS(select_create *x) : ptr(x) { } - virtual void do_prelock(TABLE **tables, uint count) - { - if (ptr->get_thd()->current_stmt_binlog_row_based) - ptr->binlog_show_create_table(tables, count); - } - -private: - select_create *ptr; -}; - - int select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { @@ -2778,8 +2845,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) MY_HOOKS(select_create *x) : ptr(x) { } virtual void do_prelock(TABLE **tables, uint count) { - if (ptr->get_thd()->current_stmt_binlog_row_based) - ptr->binlog_show_create_table(tables, count); + if (ptr->get_thd()->current_stmt_binlog_row_based && + !(ptr->get_create_info()->options & HA_LEX_CREATE_TMP_TABLE)) + ptr->binlog_show_create_table(tables, count); } private: @@ -2817,6 +2885,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) thd->cuted_fields=0; if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + if (info.handle_duplicates == DUP_REPLACE && + (!table->triggers || !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); if (!thd->prelocked_mode) table->file->ha_start_bulk_insert((ha_rows) 0); thd->no_trans_update= 0; @@ -2824,8 +2895,10 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - DBUG_RETURN(check_that_all_fields_are_given_values(thd, table, - table_list)); + if (check_that_all_fields_are_given_values(thd, table, table_list)) + DBUG_RETURN(1); + table->mark_columns_needed_for_insert(); + DBUG_RETURN(0); } @@ -2899,6 +2972,7 @@ bool select_create::send_eof() else { table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); VOID(pthread_mutex_lock(&LOCK_open)); mysql_unlock_tables(thd, thd->extra_lock); if (!table->s->tmp_table) @@ -2924,6 +2998,7 @@ void select_create::abort() if (table) { table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); handlerton *table_type=table->s->db_type; if (!table->s->tmp_table) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index f6031a1f2fd..ecc0bb6aad7 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -141,6 +141,7 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.init_order(); lex->select_lex.group_list.empty(); lex->describe= 0; @@ -183,7 +184,6 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->nest_level=0 ; lex->allow_sum_func= 0; lex->in_sum_func= NULL; - lex->binlog_row_based_if_mixed= 0; DBUG_VOID_RETURN; } @@ -1071,6 +1071,7 @@ int MYSQLlex(void *arg, void *yythd) void st_select_lex_node::init_query() { options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; linkage= UNSPECIFIED_TYPE; no_error= no_table_names_allowed= 0; uncacheable= 0; @@ -1147,6 +1148,7 @@ void st_select_lex::init_select() table_join_options= 0; in_sum_expr= with_wild= 0; options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; braces= 0; when_list.empty(); expr_list.empty(); @@ -1625,6 +1627,9 @@ void Query_tables_list::reset_query_tables_list(bool init) sroutines_list.empty(); sroutines_list_own_last= sroutines_list.next; sroutines_list_own_elements= 0; +#ifdef HAVE_ROW_BASED_REPLICATION + binlog_row_based_if_mixed= FALSE; +#endif } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index c889c2c5f94..3242e20f857 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -338,6 +338,14 @@ protected: public: ulonglong options; + + /* + In sql_cache we store SQL_CACHE flag as specified by user to be + able to restore SELECT statement from internal structures. + */ + enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE }; + e_sql_cache sql_cache; + /* result of this query can't be cached, bit field, can be : UNCACHEABLE_DEPENDENT @@ -793,6 +801,16 @@ public: byte **sroutines_list_own_last; uint sroutines_list_own_elements; +#ifdef HAVE_ROW_BASED_REPLICATION + /* + Tells if the parsing stage detected that some items require row-based + binlogging to give a reliable binlog/replication, or if we will use + stored functions or triggers which themselves need require row-based + binlogging. + */ + bool binlog_row_based_if_mixed; +#endif + /* These constructor and destructor serve for creation/destruction of Query_tables_list instances which are used as backup storage. @@ -975,11 +993,7 @@ typedef struct st_lex : public Query_tables_list uint8 create_view_check; bool drop_if_exists, drop_temporary, local_file, one_shot_set; bool in_comment, ignore_space, verbose, no_write_to_binlog; - /* - binlog_row_based_if_mixed tells if the parsing stage detected that some - items require row-based binlogging to give a reliable binlog/replication. - */ - bool tx_chain, tx_release, binlog_row_based_if_mixed; + bool tx_chain, tx_release; /* Special JOIN::prepare mode: changing of query is prohibited. When creating a view, we need to just check its syntax omitting diff --git a/sql/sql_load.cc b/sql/sql_load.cc index f8debbedc62..25cb7ff4c1e 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -187,9 +187,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table= table_list->table; transactional_table= table->file->has_transactions(); - if (table->found_next_number_field) - table->mark_auto_increment_column(); - if (!fields_vars.elements) { Field **field; @@ -232,6 +229,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, DBUG_RETURN(TRUE); } + table->mark_columns_needed_for_insert(); + uint tot_length=0; bool use_blobs= 0, use_vars= 0; List_iterator_fast<Item> it(fields_vars); @@ -362,6 +361,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (ignore || handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + if (handle_duplicates == DUP_REPLACE && + (!table->triggers || + !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); if (!thd->prelocked_mode) table->file->ha_start_bulk_insert((ha_rows) 0); table->copy_blobs=1; @@ -386,6 +389,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, error= 1; } table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); table->next_number_field=0; } ha_enable_transaction(thd, TRUE); @@ -497,13 +501,12 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, error=ha_autocommit_or_rollback(thd,error); err: + table->file->ha_release_auto_increment(); if (thd->lock) { mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - if (table != NULL) - table->file->release_auto_increment(); thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -639,14 +642,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, thd->no_trans_update= no_trans_update; /* - If auto_increment values are used, save the first one - for LAST_INSERT_ID() and for the binary/update log. - We can't use insert_id() as we don't want to touch the - last_insert_id_used flag. - */ - if (!id && thd->insert_id_used) - id= thd->last_insert_id; - /* We don't need to reset auto-increment field since we are restoring its default value at the beginning of each loop iteration. */ @@ -662,8 +657,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, thd->row_count++; continue_loop:; } - if (id && !read_info.error) - thd->insert_id(id); // For binary/update log DBUG_RETURN(test(read_info.error)); } @@ -807,14 +800,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (write_record(thd, table, &info)) DBUG_RETURN(1); /* - If auto_increment values are used, save the first one - for LAST_INSERT_ID() and for the binary/update log. - We can't use insert_id() as we don't want to touch the - last_insert_id_used flag. - */ - if (!id && thd->insert_id_used) - id= thd->last_insert_id; - /* We don't need to reset auto-increment field since we are restoring its default value at the beginning of each loop iteration. */ @@ -833,8 +818,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, thd->row_count++; continue_loop:; } - if (id && !read_info.error) - thd->insert_id(id); // For binary/update log DBUG_RETURN(test(read_info.error)); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 308f7a0ac60..16136373002 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2563,11 +2563,6 @@ mysql_execute_command(THD *thd) statistic_increment(thd->status_var.com_stat[lex->sql_command], &LOCK_status); -#ifdef HAVE_ROW_BASED_REPLICATION - if (lex->binlog_row_based_if_mixed) - thd->set_current_stmt_binlog_row_based_if_mixed(); -#endif /*HAVE_ROW_BASED_REPLICATION*/ - switch (lex->sql_command) { case SQLCOM_SHOW_EVENTS: if ((res= check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0, @@ -3142,8 +3137,7 @@ end_with_restore_list: lex->key_list, select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, - lex->duplicates, lex->ignore, &lex->alter_info, - 1); + lex->ignore, &lex->alter_info, 1); break; } #endif /*DONT_ALLOW_SHOW_COMMANDS*/ @@ -3394,8 +3388,9 @@ end_with_restore_list: res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, lex->update_list, lex->value_list, lex->duplicates, lex->ignore); + /* do not show last insert ID if VIEW does not have auto_inc */ if (first_table->view && !first_table->contain_auto_increment) - thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it + thd->first_successful_insert_id_in_cur_stmt= 0; break; } case SQLCOM_REPLACE_SELECT: @@ -3455,9 +3450,9 @@ end_with_restore_list: /* revert changes for SP */ select_lex->table_list.first= (byte*) first_table; } - + /* do not show last insert ID if VIEW does not have auto_inc */ if (first_table->view && !first_table->contain_auto_increment) - thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it + thd->first_successful_insert_id_in_cur_stmt= 0; break; } case SQLCOM_TRUNCATE: @@ -5201,9 +5196,6 @@ end: */ if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION) reset_one_shot_variables(thd); -#ifdef HAVE_ROW_BASED_REPLICATION - thd->reset_current_stmt_binlog_row_based(); -#endif /*HAVE_ROW_BASED_REPLICATION*/ /* The return value for ROW_COUNT() is "implementation dependent" if the @@ -5835,6 +5827,7 @@ mysql_init_query(THD *thd, uchar *buf, uint length) DESCRIPTION This needs to be called before execution of every statement (prepared or conventional). + It is not called by substatements of routines. TODO Make it a method of THD and align its name with the rest of @@ -5845,9 +5838,12 @@ mysql_init_query(THD *thd, uchar *buf, uint length) void mysql_reset_thd_for_next_command(THD *thd) { DBUG_ENTER("mysql_reset_thd_for_next_command"); + DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */ thd->free_list= 0; thd->select_number= 1; - thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0; + thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= + thd->query_start_used= 0; thd->is_fatal_error= thd->time_zone_used= 0; thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | SERVER_QUERY_NO_INDEX_USED | @@ -5874,6 +5870,12 @@ void mysql_reset_thd_for_next_command(THD *thd) thd->rand_used= 0; thd->sent_row_count= thd->examined_row_count= 0; } + /* + Because we come here only for start of top-statements, binlog format is + constant inside a complex statement (using stored functions) etc. + */ + thd->reset_current_stmt_binlog_row_based(); + DBUG_VOID_RETURN; } @@ -7311,7 +7313,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, 0, &alter_info, 1)); + 0, &alter_info, 1)); } @@ -7329,7 +7331,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, 0, alter_info, 1)); + 0, alter_info, 1)); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5b70d1649e7..0dc19c5417d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7895,7 +7895,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) Field *field=((Item_field*) args[0])->field; if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null && (thd->options & OPTION_AUTO_IS_NULL) && - thd->insert_id()) + (thd->first_successful_insert_id_in_prev_stmt > 0)) { #ifdef HAVE_QUERY_CACHE query_cache_abort(&thd->net); @@ -7903,7 +7903,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) COND *new_cond; if ((new_cond= new Item_func_eq(args[0], new Item_int("last_insert_id()", - thd->insert_id(), + thd->read_first_successful_insert_id_in_prev_stmt(), 21)))) { cond=new_cond; @@ -7914,7 +7914,11 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) */ cond->fix_fields(thd, &cond); } - thd->insert_id(0); // Clear for next request + /* + IS NULL should be mapped to LAST_INSERT_ID only for first row, so + clear for next row + */ + thd->first_successful_insert_id_in_prev_stmt= 0; } /* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */ else if (((field->type() == FIELD_TYPE_DATE) || @@ -14580,10 +14584,19 @@ void st_select_lex::print(THD *thd, String *str) str->append(STRING_WITH_LEN("sql_buffer_result ")); if (options & OPTION_FOUND_ROWS) str->append(STRING_WITH_LEN("sql_calc_found_rows ")); - if (!thd->lex->safe_to_cache_query) - str->append(STRING_WITH_LEN("sql_no_cache ")); - if (options & OPTION_TO_QUERY_CACHE) - str->append(STRING_WITH_LEN("sql_cache ")); + switch (sql_cache) + { + case SQL_NO_CACHE: + str->append(STRING_WITH_LEN("sql_no_cache ")); + break; + case SQL_CACHE: + str->append(STRING_WITH_LEN("sql_cache ")); + break; + case SQL_CACHE_UNSPECIFIED: + break; + default: + DBUG_ASSERT(0); + } //Item List bool first= 1; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 8f8c84c2db5..6018524456b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -71,6 +71,10 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), static void store_key_options(THD *thd, String *packet, TABLE *table, KEY *key_info); +static void +append_algorithm(TABLE_LIST *table, String *buff); + + /*************************************************************************** ** List all table types supported ***************************************************************************/ @@ -1423,6 +1427,28 @@ static void store_key_options(THD *thd, String *packet, TABLE *table, void view_store_options(THD *thd, TABLE_LIST *table, String *buff) { + append_algorithm(table, buff); + append_definer(thd, buff, &table->definer.user, &table->definer.host); + if (table->view_suid) + buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); + else + buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); +} + + +/* + Append DEFINER clause to the given buffer. + + SYNOPSIS + append_definer() + thd [in] thread handle + buffer [inout] buffer to hold DEFINER clause + definer_user [in] user name part of definer + definer_host [in] host name part of definer +*/ + +static void append_algorithm(TABLE_LIST *table, String *buff) +{ buff->append(STRING_WITH_LEN("ALGORITHM=")); switch ((int8)table->algorithm) { case VIEW_ALGORITHM_UNDEFINED: @@ -1437,11 +1463,6 @@ view_store_options(THD *thd, TABLE_LIST *table, String *buff) default: DBUG_ASSERT(0); // never should happen } - append_definer(thd, buff, &table->definer.user, &table->definer.host); - if (table->view_suid) - buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); - else - buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); } /* @@ -3517,7 +3538,16 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables, table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); table->field[2]->store(tables->view_name.str, tables->view_name.length, cs); if (grant & SHOW_VIEW_ACL) - table->field[3]->store(tables->query.str, tables->query.length, cs); + { + char buff[2048]; + String qwe_str(buff, sizeof(buff), cs); + qwe_str.length(0); + qwe_str.append(STRING_WITH_LEN("/* ")); + append_algorithm(tables, &qwe_str); + qwe_str.append(STRING_WITH_LEN("*/ ")); + qwe_str.append(tables->query.str, tables->query.length); + table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs); + } if (tables->with_check != VIEW_CHECK_NONE) { diff --git a/sql/sql_table.cc b/sql/sql_table.cc index d9a43e4580f..ccddefab421 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY"; static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end); static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); static int copy_data_between_tables(TABLE *from,TABLE *to, - List<create_field> &create, - enum enum_duplicates handle_duplicates, - bool ignore, + List<create_field> &create, bool ignore, uint order_num, ORDER *order, ha_rows *copied,ha_rows *deleted); static bool prepare_blob_field(THD *thd, create_field *sql_field); @@ -4955,8 +4953,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, HA_CREATE_INFO *lex_create_info, TABLE_LIST *table_list, List<create_field> &fields, List<Key> &keys, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, bool ignore, + uint order_num, ORDER *order, bool ignore, ALTER_INFO *alter_info, bool do_send_ok) { TABLE *table,*new_table=0; @@ -4967,7 +4964,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, char path[FN_REFLEN]; char reg_path[FN_REFLEN+1]; ha_rows copied,deleted; - ulonglong next_insert_id; uint db_create_options, used_fields; handlerton *old_db_type, *new_db_type; HA_CREATE_INFO *create_info; @@ -5787,18 +5783,15 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; thd->proc_info="copy to tmp table"; - next_insert_id=thd->next_insert_id; // Remember for logging copied=deleted=0; if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER)) { /* We don't want update TIMESTAMP fields during ALTER TABLE. */ new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; new_table->next_number_field=new_table->found_next_number_field; - error=copy_data_between_tables(table,new_table,create_list, - handle_duplicates, ignore, + error=copy_data_between_tables(table, new_table, create_list, ignore, order_num, order, &copied, &deleted); } - thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* If we did not need to copy, we might still need to add/drop indexes. */ @@ -6209,7 +6202,6 @@ end_temporary: static int copy_data_between_tables(TABLE *from,TABLE *to, List<create_field> &create, - enum enum_duplicates handle_duplicates, bool ignore, uint order_num, ORDER *order, ha_rows *copied, @@ -6228,6 +6220,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, ha_rows examined_rows; bool auto_increment_field_copied= 0; ulong save_sql_mode; + ulonglong prev_insert_id; DBUG_ENTER("copy_data_between_tables"); /* @@ -6308,8 +6301,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, /* Tell handler that we have values for all columns in the to table */ to->use_all_columns(); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - if (ignore || - handle_duplicates == DUP_REPLACE) + if (ignore) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); thd->row_count= 0; restore_record(to, s->default_values); // Create empty record @@ -6334,9 +6326,10 @@ copy_data_between_tables(TABLE *from,TABLE *to, { copy_ptr->do_copy(copy_ptr); } + prev_insert_id= to->file->next_insert_id; if ((error=to->file->ha_write_row((byte*) to->record[0]))) { - if (!ignore || handle_duplicates != DUP_ERROR || + if (!ignore || to->file->is_fatal_error(error, HA_CHECK_DUP)) { if (!to->file->is_fatal_error(error, HA_CHECK_DUP)) @@ -6357,7 +6350,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, to->file->print_error(error,MYF(0)); break; } - to->file->restore_auto_increment(); + to->file->restore_auto_increment(prev_insert_id); delete_count++; } else @@ -6391,6 +6384,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); *copied= found_count; *deleted=delete_count; + to->file->ha_release_auto_increment(); if (to->file->ha_external_lock(thd,F_UNLCK)) error=1; DBUG_RETURN(error > 0 ? -1 : 0); @@ -6428,7 +6422,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, lex->create_list, lex->key_list, 0, (ORDER *) 0, - DUP_ERROR, 0, &lex->alter_info, do_send_ok)); + 0, &lex->alter_info, do_send_ok)); } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 5abdfa27d58..1837372c6c4 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -183,6 +183,15 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) !(tables= add_table_for_trigger(thd, thd->lex->spname))) DBUG_RETURN(TRUE); + /* + We don't allow creating triggers on tables in the 'mysql' schema + */ + if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db)) + { + my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0)); + DBUG_RETURN(TRUE); + } + /* We should have only one table in table list. */ DBUG_ASSERT(tables->next_global == 0); @@ -372,7 +381,9 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, /* We don't allow creation of several triggers of the same type yet */ if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) { - my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0)); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "multiple triggers with the same action time" + " and event for one table"); return 1; } @@ -1010,8 +1021,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, } /* - Let us bind Item_trigger_field objects representing access to fields - in old/new versions of row in trigger to Field objects in table being + Gather all Item_trigger_field objects representing access to fields + in old/new versions of row in trigger into lists containing all such + objects for the triggers with same action and timing. + */ + triggers->trigger_fields[lex.trg_chistics.event] + [lex.trg_chistics.action_time]= + (Item_trigger_field *)(lex.trg_table_fields.first); + /* + Also let us bind these objects to Field objects in table being opened. We ignore errors here, because if even something is wrong we still @@ -1525,6 +1543,44 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, /* + Mark fields of subject table which we read/set in its triggers as such. + + SYNOPSIS + mark_fields_used() + thd Current thread context + event Type of event triggers for which we are going to inspect + + DESCRIPTION + This method marks fields of subject table which are read/set in its + triggers as such (by properly updating TABLE::read_set/write_set) + and thus informs handler that values for these fields should be + retrieved/stored during execution of statement. +*/ + +void Table_triggers_list::mark_fields_used(trg_event_type event) +{ + int action_time; + Item_trigger_field *trg_field; + + for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++) + { + for (trg_field= trigger_fields[event][action_time]; trg_field; + trg_field= trg_field->next_trg_field) + { + /* We cannot mark fields which does not present in table. */ + if (trg_field->field_idx != (uint)-1) + { + bitmap_set_bit(table->read_set, trg_field->field_idx); + if (trg_field->get_settable_routine_parameter()) + bitmap_set_bit(table->write_set, trg_field->field_idx); + } + } + } + table->file->column_bitmaps_signal(); +} + + +/* Trigger BUG#14090 compatibility hook SYNOPSIS diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index bddfd8c1f0c..13a919c09ca 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -26,6 +26,11 @@ class Table_triggers_list: public Sql_alloc /* Triggers as SPs grouped by event, action_time */ sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX]; /* + Heads of the lists linking items for all fields used in triggers + grouped by event and action_time. + */ + Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX]; + /* Copy of TABLE::Field array with field pointers set to TABLE::record[1] buffer instead of TABLE::record[0] (used for OLD values in on UPDATE trigger and DELETE trigger when it is called for REPLACE). @@ -82,6 +87,7 @@ public: record1_field(0), table(table_arg) { bzero((char *)bodies, sizeof(bodies)); + bzero((char *)trigger_fields, sizeof(trigger_fields)); bzero((char *)&subject_table_grants, sizeof(subject_table_grants)); } ~Table_triggers_list(); @@ -119,6 +125,8 @@ public: void set_table(TABLE *new_table); + void mark_fields_used(trg_event_type event); + friend class Item_trigger_field; friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, TABLE_LIST *table); @@ -132,10 +140,6 @@ private: const char *db_name, LEX_STRING *old_table_name, LEX_STRING *new_table_name); - friend void st_table::mark_columns_needed_for_insert(void); - friend void st_table::mark_columns_needed_for_update(void); - friend void st_table::mark_columns_needed_for_delete(void); - }; extern const LEX_STRING trg_action_time_type_names[]; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 2164da01c4c..881d4ba1357 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -135,7 +135,8 @@ int mysql_update(THD *thd, SQL_SELECT *select; READ_RECORD info; SELECT_LEX *select_lex= &thd->lex->select_lex; - bool need_reopen; + bool need_reopen; + ulonglong id; DBUG_ENTER("mysql_update"); for ( ; ; ) @@ -676,6 +677,10 @@ int mysql_update(THD *thd, thd->lock=0; } + /* If LAST_INSERT_ID(X) was used, report X */ + id= thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : 0; + if (error < 0) { char buff[STRING_BUFFER_USUAL_SIZE]; @@ -683,8 +688,7 @@ int mysql_update(THD *thd, (ulong) thd->cuted_fields); thd->row_count_func= (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; - send_ok(thd, (ulong) thd->row_count_func, - thd->insert_id_used ? thd->insert_id() : 0L,buff); + send_ok(thd, (ulong) thd->row_count_func, id, buff); DBUG_PRINT("info",("%d records updated",updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ @@ -1634,6 +1638,7 @@ err2: bool multi_update::send_eof() { char buff[STRING_BUFFER_USUAL_SIZE]; + ulonglong id; thd->proc_info="updating reference tables"; /* Does updates for the last n - 1 tables, returns 0 if ok */ @@ -1686,12 +1691,12 @@ bool multi_update::send_eof() return TRUE; } - + id= thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : 0; sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); thd->row_count_func= (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; - ::send_ok(thd, (ulong) thd->row_count_func, - thd->insert_id_used ? thd->insert_id() : 0L,buff); + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); return FALSE; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 94ece79da81..d24b247a22f 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -996,6 +996,15 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table) table->next_global= view_tables; } +#ifdef HAVE_ROW_BASED_REPLICATION + /* + If the view's body needs row-based binlogging (e.g. the VIEW is created + from SELECT UUID()), the top statement also needs it. + */ + if (lex->binlog_row_based_if_mixed) + old_lex->binlog_row_based_if_mixed= TRUE; +#endif + /* If we are opening this view as part of implicit LOCK TABLES, then this view serves as simple placeholder and we should not continue diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 68aeefc1278..8ce5bbc87aa 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -5707,10 +5707,21 @@ select_option: YYABORT; Select->options|= OPTION_FOUND_ROWS; } - | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; } + | SQL_NO_CACHE_SYM + { + Lex->safe_to_cache_query=0; + Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE; + } | SQL_CACHE_SYM { - Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + /* Honor this flag only if SQL_NO_CACHE wasn't specified. */ + if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE) + { + Lex->safe_to_cache_query=1; + Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE; + } } | ALL { Select->options|= SELECT_ALL; } ; @@ -6403,7 +6414,7 @@ simple_expr: if (udf->type == UDFTYPE_AGGREGATE) Select->in_sum_expr--; - Lex->binlog_row_based_if_mixed= 1; + Lex->binlog_row_based_if_mixed= TRUE; switch (udf->returns) { case STRING_RESULT: @@ -8073,6 +8084,7 @@ truncate: LEX* lex= Lex; lex->sql_command= SQLCOM_TRUNCATE; lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.init_order(); } ; diff --git a/sql/structs.h b/sql/structs.h index 38bb441fc03..83ae6cac032 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -250,3 +250,99 @@ typedef struct user_conn { #define STATUS_UPDATED 16 /* Record is updated by formula */ #define STATUS_NULL_ROW 32 /* table->null_row is set */ #define STATUS_DELETED 64 + +/* + Such interval is "discrete": it is the set of + { auto_inc_interval_min + k * increment, + 0 <= k <= (auto_inc_interval_values-1) } + Where "increment" is maintained separately by the user of this class (and is + currently only thd->variables.auto_increment_increment). + It mustn't derive from Sql_alloc, because SET INSERT_ID needs to + allocate memory which must stay allocated for use by the next statement. +*/ +class Discrete_interval { +private: + ulonglong interval_min; + ulonglong interval_values; + ulonglong interval_max; // excluded bound. Redundant. +public: + Discrete_interval *next; // used when linked into Discrete_intervals_list + void replace(ulonglong start, ulonglong val, ulonglong incr) + { + interval_min= start; + interval_values= val; + interval_max= (val == ULONGLONG_MAX) ? val : start + val * incr; + } + Discrete_interval(ulonglong start, ulonglong val, ulonglong incr) : + next(NULL) { replace(start, val, incr); }; + Discrete_interval() : next(NULL) { replace(0, 0, 0); }; + ulonglong minimum() const { return interval_min; }; + ulonglong values() const { return interval_values; }; + ulonglong maximum() const { return interval_max; }; + /* + If appending [3,5] to [1,2], we merge both in [1,5] (they should have the + same increment for that, user of the class has to ensure that). That is + just a space optimization. Returns 0 if merge succeeded. + */ + bool merge_if_contiguous(ulonglong start, ulonglong val, ulonglong incr) + { + if (interval_max == start) + { + if (val == ULONGLONG_MAX) + { + interval_values= interval_max= val; + } + else + { + interval_values+= val; + interval_max= start + val * incr; + } + return 0; + } + return 1; + }; +}; + +/* List of Discrete_interval objects */ +class Discrete_intervals_list { +private: + Discrete_interval *head; + Discrete_interval *tail; + /* + When many intervals are provided at the beginning of the execution of a + statement (in a replication slave or SET INSERT_ID), "current" points to + the interval being consumed by the thread now (so "current" goes from + "head" to "tail" then to NULL). + */ + Discrete_interval *current; + uint elements; // number of elements +public: + Discrete_intervals_list() : head(NULL), current(NULL), elements(0) {}; + void empty_no_free() + { + head= current= NULL; + elements= 0; + } + void empty() + { + for (Discrete_interval *i= head; i;) + { + Discrete_interval *next= i->next; + delete i; + i= next; + } + empty_no_free(); + } + const Discrete_interval* get_next() + { + Discrete_interval *tmp= current; + if (current != NULL) + current= current->next; + return tmp; + } + ~Discrete_intervals_list() { empty(); }; + bool append(ulonglong start, ulonglong val, ulonglong incr); + ulonglong minimum() const { return (head ? head->minimum() : 0); }; + ulonglong maximum() const { return (head ? tail->maximum() : 0); }; + uint nb_elements() const { return elements; } +}; diff --git a/sql/table.cc b/sql/table.cc index 8bee8bf1598..163f2a4f0b8 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3925,16 +3925,7 @@ void st_table::mark_auto_increment_column() void st_table::mark_columns_needed_for_delete() { if (triggers) - { - if (triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || - triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]) - { - /* TODO: optimize to only add columns used by trigger */ - use_all_columns(); - return; - } - } - + triggers->mark_fields_used(TRG_EVENT_DELETE); if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE) { Field **reg_field; @@ -3985,15 +3976,7 @@ void st_table::mark_columns_needed_for_update() { DBUG_ENTER("mark_columns_needed_for_update"); if (triggers) - { - if (triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] || - triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER]) - { - /* TODO: optimize to only add columns used by trigger */ - use_all_columns(); - DBUG_VOID_RETURN; - } - } + triggers->mark_fields_used(TRG_EVENT_UPDATE); if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE) { /* Mark all used key columns for read */ @@ -4036,13 +4019,14 @@ void st_table::mark_columns_needed_for_insert() { if (triggers) { - if (triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] || - triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_AFTER]) - { - /* TODO: optimize to only add columns used by trigger */ - use_all_columns(); - return; - } + /* + We don't need to mark columns which are used by ON DELETE and + ON UPDATE triggers, which may be invoked in case of REPLACE or + INSERT ... ON DUPLICATE KEY UPDATE, since before doing actual + row replacement or update write_record() will mark all table + fields as used. + */ + triggers->mark_fields_used(TRG_EVENT_INSERT); } if (found_next_number_field) mark_auto_increment_column(); diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c index dd696528eb8..703b39b1e2d 100644 --- a/storage/heap/hp_test1.c +++ b/storage/heap/hp_test1.c @@ -44,6 +44,7 @@ int main(int argc, char **argv) get_options(argc,argv); bzero(&hp_create_info, sizeof(hp_create_info)); + hp_create_info.max_table_size= 1024L*1024L; keyinfo[0].keysegs=1; keyinfo[0].seg=keyseg; @@ -58,7 +59,7 @@ int main(int argc, char **argv) bzero((gptr) flags,sizeof(flags)); printf("- Creating heap-file\n"); - if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000l,10l, + if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000L,10L, &hp_create_info) || !(file= heap_open(filename, 2))) goto err; diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 228dab57650..7c44fadfa62 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -2075,6 +2075,8 @@ private: * * XXX only table ops check BlockState */ + struct DictLockType; + friend struct DictLockType; struct DictLockType { DictLockReq::LockType lockType; @@ -2082,6 +2084,9 @@ private: const char* text; }; + struct DictLockRecord; + friend struct DictLockRecord; + struct DictLockRecord { DictLockReq req; const DictLockType* lt; diff --git a/strings/strtod.c b/strings/strtod.c index 61f2c107abe..e0910205d2f 100644 --- a/strings/strtod.c +++ b/strings/strtod.c @@ -26,7 +26,8 @@ */ -#include "my_base.h" /* Includes errno.h */ +#include "my_base.h" /* Defines EOVERFLOW on Windows */ +#include "my_global.h" /* Includes errno.h */ #include "m_ctype.h" #define MAX_DBL_EXP 308 diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 1c22fb2586b..a961fc145a9 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -28,6 +28,23 @@ %define see_base For a description of MySQL see the base MySQL RPM or http://www.mysql.com +# On SuSE 9 no separate "debuginfo" package is built. To enable basic +# debugging on that platform, we don't strip binaries on SuSE 9. We +# disable the strip of binaries by redefining the RPM macro +# "__os_install_post" leaving out the script calls that normally does +# this. We do this in all cases, as on platforms where "debuginfo" is +# created, a script "find-debuginfo.sh" will be called that will do +# the strip anyway, part of separating the executable and debug +# information into separate files put into separate packages. +# +# Some references (shows more advanced conditional usage): +# http://www.redhat.com/archives/rpm-list/2001-November/msg00257.html +# http://www.redhat.com/archives/rpm-list/2003-February/msg00275.html +# http://www.redhat.com/archives/rhl-devel-list/2004-January/msg01546.html +# http://lists.opensuse.org/archive/opensuse-commit/2006-May/1171.html + +%define __os_install_post /usr/lib/rpm/brp-compress + Name: MySQL Summary: MySQL: a very fast and reliable SQL database server Group: Applications/Databases diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index e3159cfa5e5..d0b7d22ca24 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -14900,11 +14900,13 @@ static void test_bug17667() printf("success. All queries found intact in the log.\n"); - } else { + } + else + { fprintf(stderr, "Could not find the log file, var/log/master.log, so " - "test_bug17667 is \ninconclusive. Run test from the " - "mysql-test/mysql-test-run* program \nto set up the correct " - "environment for this test.\n\n"); + "test_bug17667 is \ninconclusive. Run test from the " + "mysql-test/mysql-test-run* program \nto set up the correct " + "environment for this test.\n\n"); } if (log_file != NULL) @@ -14914,7 +14916,8 @@ static void test_bug17667() /* - Bug#14169: type of group_concat() result changed to blob if tmp_table was used + Bug#14169: type of group_concat() result changed to blob if tmp_table was + used */ static void test_bug14169() { @@ -14947,7 +14950,243 @@ static void test_bug14169() rc= mysql_query(mysql, "drop table t1"); myquery(rc); -}/* +} + +/* + Test that mysql_insert_id() behaves as documented in our manual +*/ + +static void test_mysql_insert_id() +{ + my_ulonglong res; + int rc; + + myheader("test_mysql_insert_id"); + + rc= mysql_query(mysql, "drop table if exists t1"); + myquery(rc); + /* table without auto_increment column */ + rc= mysql_query(mysql, "create table t1 (f1 int, f2 varchar(255), key(f1))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t1 values (1,'a')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 values (null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 select 5,'c'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 select null,'d'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 values (null,last_insert_id(300))"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 300); + rc= mysql_query(mysql, "insert into t1 select null,last_insert_id(400)"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Behaviour change: old code used to return 0; but 400 is consistent + with INSERT VALUES, and the manual's section of mysql_insert_id() does not + say INSERT SELECT should be different. + */ + DIE_UNLESS(res == 400); + + /* table with auto_increment column */ + rc= mysql_query(mysql, "create table t2 (f1 int not null primary key auto_increment, f2 varchar(255))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t2 values (1,'a')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 1); + /* this should not influence next INSERT if it doesn't have auto_inc */ + rc= mysql_query(mysql, "insert into t1 values (10,'e')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + + rc= mysql_query(mysql, "insert into t2 values (null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 2); + rc= mysql_query(mysql, "insert into t2 select 5,'c'"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Manual says that for multirow insert this should have been 5, but does not + say for INSERT SELECT. This is a behaviour change: old code used to return + 0. We try to be consistent with INSERT VALUES. + */ + DIE_UNLESS(res == 5); + rc= mysql_query(mysql, "insert into t2 select null,'d'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 6); + /* with more than one row */ + rc= mysql_query(mysql, "insert into t2 values (10,'a'),(11,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 11); + rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Manual says that for multirow insert this should have been 13, but does + not say for INSERT SELECT. This is a behaviour change: old code used to + return 0. We try to be consistent with INSERT VALUES. + */ + DIE_UNLESS(res == 13); + rc= mysql_query(mysql, "insert into t2 values (null,'a'),(null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 14); + rc= mysql_query(mysql, "insert into t2 select null,'a' union select null,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 16); + rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'"); + myquery_r(rc); + rc= mysql_query(mysql, "insert ignore into t2 select 12,'a' union select 13,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t2 values (12,'a'),(13,'b')"); + myquery_r(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert ignore into t2 values (12,'a'),(13,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + /* mixing autogenerated and explicit values */ + rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b')"); + myquery_r(rc); + rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b'),(25,'g')"); + myquery_r(rc); + rc= mysql_query(mysql, "insert into t2 values (null,last_insert_id(300))"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + according to the manual, this might be 20 or 300, but it looks like + auto_increment column takes priority over last_insert_id(). + */ + DIE_UNLESS(res == 20); + /* If first autogenerated number fails and 2nd works: */ + rc= mysql_query(mysql, "drop table t2"); + myquery(rc); + rc= mysql_query(mysql, "create table t2 (f1 int not null primary key " + "auto_increment, f2 varchar(255), unique (f2))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t2 values (null,'e')"); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 1); + rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(null,'a'),(null,'e')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 2); + /* If autogenerated fails and explicit works: */ + rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(12,'c'),(null,'d')"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Behaviour change: old code returned 3 (first autogenerated, even if it + fails); we now return first successful autogenerated. + */ + DIE_UNLESS(res == 13); + /* UPDATE may update mysql_insert_id() if it uses LAST_INSERT_ID(#) */ + rc= mysql_query(mysql, "update t2 set f1=14 where f1=12"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "update t2 set f1=NULL where f1=14"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "update t2 set f2=last_insert_id(372) where f1=0"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 372); + /* check that LAST_INSERT_ID() does not update mysql_insert_id(): */ + rc= mysql_query(mysql, "insert into t2 values (null,'g')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 15); + rc= mysql_query(mysql, "update t2 set f2=(@li:=last_insert_id()) where f1=15"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + /* + Behaviour change: now if ON DUPLICATE KEY UPDATE updates a row, + mysql_insert_id() returns the id of the row, instead of not being + affected. + */ + rc= mysql_query(mysql, "insert into t2 values (null,@li) on duplicate key " + "update f2=concat('we updated ',f2)"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 15); + + rc= mysql_query(mysql, "drop table t1,t2"); + myquery(rc); +} + +/* + Bug#20152: mysql_stmt_execute() writes to MYSQL_TYPE_DATE buffer +*/ + +static void test_bug20152() +{ + MYSQL_BIND bind[1]; + MYSQL_STMT *stmt; + MYSQL_TIME tm; + int rc; + const char *query= "INSERT INTO t1 (f1) VALUES (?)"; + + myheader("test_bug20152"); + + memset(bind, 0, sizeof(bind)); + bind[0].buffer_type= MYSQL_TYPE_DATE; + bind[0].buffer= (void*)&tm; + + tm.year = 2006; + tm.month = 6; + tm.day = 18; + tm.hour = 14; + tm.minute = 9; + tm.second = 42; + + rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1"); + myquery(rc); + rc= mysql_query(mysql, "CREATE TABLE t1 (f1 DATE)"); + myquery(rc); + + stmt= mysql_stmt_init(mysql); + rc= mysql_stmt_prepare(stmt, query, strlen(query)); + check_execute(stmt, rc); + rc= mysql_stmt_bind_param(stmt, bind); + check_execute(stmt, rc); + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + rc= mysql_stmt_close(stmt); + check_execute(stmt, rc); + rc= mysql_query(mysql, "DROP TABLE t1"); + myquery(rc); + + if (tm.hour == 14 && tm.minute == 9 && tm.second == 42) { + if (!opt_silent) + printf("OK!"); + } else { + printf("[14:09:42] != [%02d:%02d:%02d]\n", tm.hour, tm.minute, tm.second); + DIE_UNLESS(0==1); + } +} +/* Read and parse arguments and MySQL options from my.cnf */ @@ -15212,8 +15451,10 @@ static struct my_tests_st my_tests[]= { { "test_bug16143", test_bug16143 }, { "test_bug16144", test_bug16144 }, { "test_bug15613", test_bug15613 }, + { "test_bug20152", test_bug20152 }, { "test_bug14169", test_bug14169 }, { "test_bug17667", test_bug17667 }, + { "test_mysql_insert_id", test_mysql_insert_id }, { 0, 0 } }; diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c index 71408ce957f..4e2e496c3b1 100644 --- a/unittest/mysys/my_atomic-t.c +++ b/unittest/mysys/my_atomic-t.c @@ -166,9 +166,9 @@ int main() pthread_cond_init(&cond, 0); my_atomic_rwlock_init(&rwl); - test_atomic("my_atomic_add32", test_atomic_add_handler, 100,1000000); - test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,1000000); - test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,1000000); + test_atomic("my_atomic_add32", test_atomic_add_handler, 100,10000); + test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,10000); + test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,10000); pthread_mutex_destroy(&mutex); pthread_cond_destroy(&cond); |