diff options
author | stewart@mysql.com <> | 2006-06-16 21:01:02 +1000 |
---|---|---|
committer | stewart@mysql.com <> | 2006-06-16 21:01:02 +1000 |
commit | b5165c371e7d55a126b1fb867a973d3150007428 (patch) | |
tree | 936c70e175ad0402e4b9c78f99d50f92ad724d1b | |
parent | 3e1a5169c727a1d849a4c337f23c203b2ee5df3c (diff) | |
parent | 061bdf919f64418b1723cfedead015e9420d191a (diff) | |
download | mariadb-git-b5165c371e7d55a126b1fb867a973d3150007428.tar.gz |
Merge mysql.com:/home/stewart/Documents/MySQL/5.0/merge-queue
into mysql.com:/home/stewart/Documents/MySQL/5.0/main
53 files changed, 974 insertions, 209 deletions
diff --git a/VC++Files/client/mysql.dsp b/VC++Files/client/mysql.dsp index ac74515b588..510107c8308 100644 --- a/VC++Files/client/mysql.dsp +++ b/VC++Files/client/mysql.dsp @@ -144,6 +144,10 @@ SOURCE=.\readline.cpp # End Source File # Begin Source File +SOURCE=..\mysys\my_conio.c +# End Source File +# Begin Source File + SOURCE=.\sql_string.cpp # End Source File # End Target diff --git a/VC++Files/client/mysql_ia64.dsp b/VC++Files/client/mysql_ia64.dsp index 3fe2e2a2328..8de283d1e0b 100644 --- a/VC++Files/client/mysql_ia64.dsp +++ b/VC++Files/client/mysql_ia64.dsp @@ -130,6 +130,10 @@ SOURCE=.\readline.cpp # End Source File # Begin Source File +SOURCE=..\mysys\my_conio.c +# End Source File +# Begin Source File + SOURCE=.\sql_string.cpp # End Source File # End Target diff --git a/extra/yassl/src/handshake.cpp b/extra/yassl/src/handshake.cpp index 12b62f26e14..66ec64f4af8 100644 --- a/extra/yassl/src/handshake.cpp +++ b/extra/yassl/src/handshake.cpp @@ -880,7 +880,7 @@ int sendData(SSL& ssl, const void* buffer, int sz) ssl.SetError(no_error); ssl.verfiyHandShakeComplete(); - if (ssl.GetError()) return 0; + if (ssl.GetError()) return -1; int sent = 0; for (;;) { @@ -891,7 +891,7 @@ int sendData(SSL& ssl, const void* buffer, int sz) buildMessage(ssl, out, data); ssl.Send(out.get_buffer(), out.get_size()); - if (ssl.GetError()) return 0; + if (ssl.GetError()) return -1; sent += len; if (sent == sz) break; } @@ -918,14 +918,14 @@ int receiveData(SSL& ssl, Data& data) ssl.SetError(no_error); ssl.verfiyHandShakeComplete(); - if (ssl.GetError()) return 0; + if (ssl.GetError()) return -1; if (!ssl.bufferedData()) processReply(ssl); ssl.fillData(data); ssl.useLog().ShowData(data.get_length()); - if (ssl.GetError()) return 0; + if (ssl.GetError()) return -1; if (data.get_length() == 0 && ssl.getSocket().WouldBlock()) { ssl.SetError(YasslError(SSL_ERROR_WANT_READ)); diff --git a/extra/yassl/src/socket_wrapper.cpp b/extra/yassl/src/socket_wrapper.cpp index 803f4b01249..06b403c999d 100644 --- a/extra/yassl/src/socket_wrapper.cpp +++ b/extra/yassl/src/socket_wrapper.cpp @@ -113,13 +113,22 @@ uint Socket::get_ready() const uint Socket::send(const byte* buf, unsigned int sz, int flags) const { + const byte* pos = buf; + const byte* end = pos + sz; + assert(socket_ != INVALID_SOCKET); - int sent = ::send(socket_, reinterpret_cast<const char *>(buf), sz, flags); + + while (pos != end) { + int sent = ::send(socket_, reinterpret_cast<const char *>(pos), + static_cast<int>(end - pos), flags); if (sent == -1) return 0; - return sent; + pos += sent; + } + + return sz; } diff --git a/myisam/mi_check.c b/myisam/mi_check.c index 7d87ecd8595..e0f04965650 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -453,25 +453,24 @@ int chk_key(MI_CHECK *param, register MI_INFO *info) if ((uint) share->base.auto_key -1 == key) { /* Check that auto_increment key is bigger than max key value */ - ulonglong save_auto_value=info->s->state.auto_increment; - info->s->state.auto_increment=0; + ulonglong auto_increment; info->lastinx=key; _mi_read_key_record(info, 0L, info->rec_buff); - update_auto_increment(info, info->rec_buff); - if (info->s->state.auto_increment > save_auto_value) + auto_increment= retrieve_auto_increment(info, info->rec_buff); + if (auto_increment > info->s->state.auto_increment) { - mi_check_print_warning(param, - "Auto-increment value: %s is smaller than max used value: %s", - llstr(save_auto_value,buff2), - llstr(info->s->state.auto_increment, buff)); + mi_check_print_warning(param, "Auto-increment value: %s is smaller " + "than max used value: %s", + llstr(info->s->state.auto_increment,buff2), + llstr(auto_increment, buff)); } if (param->testflag & T_AUTO_INC) { - set_if_bigger(info->s->state.auto_increment, - param->auto_increment_value); + set_if_bigger(info->s->state.auto_increment, + auto_increment); + set_if_bigger(info->s->state.auto_increment, + param->auto_increment_value); } - else - info->s->state.auto_increment=save_auto_value; /* Check that there isn't a row with auto_increment = 0 in the table */ mi_extra(info,HA_EXTRA_KEYREAD,0); @@ -481,8 +480,8 @@ int chk_key(MI_CHECK *param, register MI_INFO *info) { /* Don't count this as a real warning, as myisamchk can't correct it */ uint save=param->warning_printed; - mi_check_print_warning(param, - "Found row where the auto_increment column has the value 0"); + mi_check_print_warning(param, "Found row where the auto_increment " + "column has the value 0"); param->warning_printed=save; } mi_extra(info,HA_EXTRA_NO_KEYREAD,0); @@ -4099,11 +4098,10 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, } else { - ulonglong auto_increment= (repair_only ? info->s->state.auto_increment : - param->auto_increment_value); - info->s->state.auto_increment=0; - update_auto_increment(info, record); + ulonglong auto_increment= retrieve_auto_increment(info, record); set_if_bigger(info->s->state.auto_increment,auto_increment); + if (!repair_only) + set_if_bigger(info->s->state.auto_increment, param->auto_increment_value); } mi_extra(info,HA_EXTRA_NO_KEYREAD,0); my_free((char*) record, MYF(0)); diff --git a/myisam/mi_key.c b/myisam/mi_key.c index 717a5dbd56e..17ea56f0210 100644 --- a/myisam/mi_key.c +++ b/myisam/mi_key.c @@ -507,22 +507,21 @@ int _mi_read_key_record(MI_INFO *info, my_off_t filepos, byte *buf) return(-1); /* Wrong data to read */ } - + /* - Update auto_increment info + Retrieve auto_increment info SYNOPSIS - update_auto_increment() + retrieve_auto_increment() info MyISAM handler record Row to update IMPLEMENTATION - Only replace the auto_increment value if it is higher than the previous - one. For signed columns we don't update the auto increment value if it's + For signed columns we don't retrieve the auto increment value if it's less than zero. */ -void update_auto_increment(MI_INFO *info,const byte *record) +ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record) { ulonglong value= 0; /* Store unsigned values here */ longlong s_value= 0; /* Store signed values here */ @@ -587,6 +586,5 @@ void update_auto_increment(MI_INFO *info,const byte *record) and if s_value == 0 then value will contain either s_value or the correct value. */ - set_if_bigger(info->s->state.auto_increment, - (s_value > 0) ? (ulonglong) s_value : value); + return (s_value > 0) ? (ulonglong) s_value : value; } diff --git a/myisam/mi_update.c b/myisam/mi_update.c index 937c9983b45..f8b5cf55406 100644 --- a/myisam/mi_update.c +++ b/myisam/mi_update.c @@ -164,7 +164,8 @@ int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec) key_changed|= HA_STATE_CHANGED; /* Must update index file */ } if (auto_key_changed) - update_auto_increment(info,newrec); + set_if_bigger(info->s->state.auto_increment, + retrieve_auto_increment(info, newrec)); if (share->calc_checksum) info->state->checksum+=(info->checksum - old_checksum); diff --git a/myisam/mi_write.c b/myisam/mi_write.c index 5e79b2937cc..9ab8753f6d7 100644 --- a/myisam/mi_write.c +++ b/myisam/mi_write.c @@ -149,7 +149,8 @@ int mi_write(MI_INFO *info, byte *record) info->state->checksum+=info->checksum; } if (share->base.auto_key) - update_auto_increment(info,record); + set_if_bigger(info->s->state.auto_increment, + retrieve_auto_increment(info, record)); info->update= (HA_STATE_CHANGED | HA_STATE_AKTIV | HA_STATE_WRITTEN | HA_STATE_ROW_CHANGED); info->state->records++; diff --git a/myisam/myisamdef.h b/myisam/myisamdef.h index 6ccb52aff22..d589173f0e7 100644 --- a/myisam/myisamdef.h +++ b/myisam/myisamdef.h @@ -582,7 +582,7 @@ extern uint _mi_pack_key(MI_INFO *info,uint keynr,uchar *key,uchar *old, extern int _mi_read_key_record(MI_INFO *info,my_off_t filepos,byte *buf); extern int _mi_read_cache(IO_CACHE *info,byte *buff,my_off_t pos, uint length,int re_read_if_possibly); -extern void update_auto_increment(MI_INFO *info,const byte *record); +extern ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record); extern byte *mi_alloc_rec_buff(MI_INFO *,ulong, byte**); #define mi_get_rec_buff_ptr(info,buf) \ diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 4a9628c0721..92407380ac2 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -671,6 +671,12 @@ sub command_line_setup () { { push(@opt_extra_mysqld_opt, $arg); } + elsif ( $arg =~ /^--$/ ) + { + # It is an effect of setting 'pass_through' in option processing + # that the lone '--' separating options from arguments survives, + # simply ignore it. + } elsif ( $arg =~ /^-/ ) { usage("Invalid option \"$arg\""); @@ -3428,6 +3434,13 @@ sub valgrind_arguments { ############################################################################## sub usage ($) { + my $message= shift; + + if ( $message ) + { + print STDERR "$message \n"; + } + print STDERR <<HERE; mysql-test-run [ OPTIONS ] [ TESTCASE ] diff --git a/mysql-test/r/ctype_sjis.result b/mysql-test/r/ctype_sjis.result index d1976a516d2..dab5991b505 100644 --- a/mysql-test/r/ctype_sjis.result +++ b/mysql-test/r/ctype_sjis.result @@ -172,6 +172,6 @@ c2h ab_def drop table t1; SET NAMES sjis; -SELECT HEX('²“‘@\Œ\') FROM DUAL; -HEX('²“‘@_Œ\') +SELECT HEX('²“‘@Œ\') FROM DUAL; +HEX('²“‘@Œ\') 8DB2939181408C5C diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 0e029802555..bc1bd3b6757 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -7,20 +7,20 @@ period_add("9602",-12) period_diff(199505,"9404") 199502 13 select now()-now(),weekday(curdate())-weekday(now()),unix_timestamp()-unix_timestamp(now()); now()-now() weekday(curdate())-weekday(now()) unix_timestamp()-unix_timestamp(now()) -0 0 0 +0.000000 0 0 select from_unixtime(unix_timestamp("1994-03-02 10:11:12")),from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s"),from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0; from_unixtime(unix_timestamp("1994-03-02 10:11:12")) from_unixtime(unix_timestamp("1994-03-02 10:11:12"),"%Y-%m-%d %h:%i:%s") from_unixtime(unix_timestamp("1994-03-02 10:11:12"))+0 -1994-03-02 10:11:12 1994-03-02 10:11:12 19940302101112 +1994-03-02 10:11:12 1994-03-02 10:11:12 19940302101112.000000 select sec_to_time(9001),sec_to_time(9001)+0,time_to_sec("15:12:22"), sec_to_time(time_to_sec("0:30:47")/6.21); sec_to_time(9001) sec_to_time(9001)+0 time_to_sec("15:12:22") sec_to_time(time_to_sec("0:30:47")/6.21) -02:30:01 23001 54742 00:04:57 +02:30:01 23001.000000 54742 00:04:57 select sec_to_time(time_to_sec('-838:59:59')); sec_to_time(time_to_sec('-838:59:59')) -838:59:59 select now()-curdate()*1000000-curtime(); now()-curdate()*1000000-curtime() -0 +0.000000 select strcmp(current_timestamp(),concat(current_date()," ",current_time())); strcmp(current_timestamp(),concat(current_date()," ",current_time())) 0 @@ -751,6 +751,10 @@ select monthname(str_to_date(null, '%m')), monthname(str_to_date(null, '%m')), monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m')); monthname(str_to_date(null, '%m')) monthname(str_to_date(null, '%m')) monthname(str_to_date(1, '%m')) monthname(str_to_date(0, '%m')) NULL NULL January NULL +select now() - now() + 0, curtime() - curtime() + 0, +sec_to_time(1) + 0, from_unixtime(1) + 0; +now() - now() + 0 curtime() - curtime() + 0 sec_to_time(1) + 0 from_unixtime(1) + 0 +0.000000 0.000000 1.000000 19700101030001.000000 explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1, timestampdiff(SQL_TSI_FRAC_SECOND, '2001-02-01 12:59:59.120000', '2001-05-01 12:58:58.119999') as a2; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index 89428ed6a9b..2cb122fb988 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -43,6 +43,14 @@ Field Type Null Key Default Extra a int(11) YES NULL unlock tables; drop table t1; +CREATE DATABASE mysqltest_1; +FLUSH TABLES WITH READ LOCK; + DROP DATABASE mysqltest_1; +DROP DATABASE mysqltest_1; +ERROR HY000: Can't execute the query because you have a conflicting read lock +UNLOCK TABLES; +DROP DATABASE mysqltest_1; +ERROR HY000: Can't drop database 'mysqltest_1'; database doesn't exist use mysql; LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE; FLUSH TABLES; diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index 9a34d6fba58..568f83b7d6d 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -776,3 +776,9 @@ insert into t1 values ("Monty"),("WAX"),("Walrus"); alter table t1 engine=MERGE; ERROR HY000: Table storage engine for 't1' doesn't have this option drop table t1; +create table t1 (b bit(1)); +create table t2 (b bit(1)); +create table tm (b bit(1)) engine = merge union = (t1,t2); +select * from tm; +b +drop table tm, t1, t2; diff --git a/mysql-test/r/ndb_condition_pushdown.result b/mysql-test/r/ndb_condition_pushdown.result index 27a510b252f..4e5597a4851 100644 --- a/mysql-test/r/ndb_condition_pushdown.result +++ b/mysql-test/r/ndb_condition_pushdown.result @@ -1842,5 +1842,29 @@ a b select * from t1 where b like 'abc' or b like 'abc'; a b 3 abc +drop table t1; +create table t1 ( fname varchar(255), lname varchar(255) ) +engine=ndbcluster; +insert into t1 values ("Young","Foo"); +set engine_condition_pushdown = 0; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +fname lname +Young Foo +set engine_condition_pushdown = 1; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +fname lname +Young Foo +insert into t1 values ("aaa", "aaa"); +insert into t1 values ("bbb", "bbb"); +insert into t1 values ("ccc", "ccc"); +insert into t1 values ("ddd", "ddd"); +set engine_condition_pushdown = 0; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +fname lname +Young Foo +set engine_condition_pushdown = 1; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +fname lname +Young Foo set engine_condition_pushdown = @old_ecpd; DROP TABLE t1,t2,t3,t4,t5; diff --git a/mysql-test/r/ndb_loaddatalocal.result b/mysql-test/r/ndb_loaddatalocal.result new file mode 100644 index 00000000000..1d15c608f03 --- /dev/null +++ b/mysql-test/r/ndb_loaddatalocal.result @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS t1; +create table t1(a int) engine=myisam; +select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; +create table t1(a int) engine=ndb; +load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +select count(*) from t1; +count(*) +10000 +drop table t1; +create table t1(a int) engine=myisam; +insert into t1 values (1), (2), (2), (3); +select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; +create table t1(a int primary key) engine=ndb; +load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +select * from t1 order by a; +a +1 +2 +3 +drop table t1; +create table t1(a int) engine=myisam; +insert into t1 values (1), (1), (2), (3); +select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; +create table t1(a int primary key) engine=ndb; +load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +select * from t1 order by a; +a +1 +2 +3 +drop table t1; +create table t1(a int) engine=myisam; +insert into t1 values (1), (2), (3), (3); +select * into outfile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; +create table t1(a int primary key) engine=ndb; +load data local infile 'MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +select * from t1 order by a; +a +1 +2 +3 +drop table t1; diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result index b8c2c58aac4..3b433023843 100644 --- a/mysql-test/r/ndb_lock.result +++ b/mysql-test/r/ndb_lock.result @@ -63,3 +63,83 @@ pk u o 5 5 5 insert into t1 values (1,1,1); drop table t1; +create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; +insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); +begin; +select * from t1 where x = 1 for update; +x y z +1 one 1 +begin; +select * from t1 where x = 2 for update; +x y z +2 two 2 +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +begin; +select * from t1 where y = 'one' or y = 'three' order by x for update; +x y z +1 one 1 +3 three 3 +begin; +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +begin; +select * from t1 where z > 1 and z < 3 for update; +x y z +2 two 2 +begin; +select * from t1 where x = 1 for update; +x y z +1 one 1 +select * from t1 where x = 2 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +begin; +select * from t1 where x = 1 lock in share mode; +x y z +1 one 1 +begin; +select * from t1 where x = 1 lock in share mode; +x y z +1 one 1 +select * from t1 where x = 2 for update; +x y z +2 two 2 +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +begin; +select * from t1 where y = 'one' or y = 'three' order by x lock in share mode; +x y z +1 one 1 +3 three 3 +begin; +select * from t1 where y = 'one' lock in share mode; +x y z +1 one 1 +select * from t1 where x = 1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +begin; +select * from t1 where z > 1 and z < 3 lock in share mode; +x y z +2 two 2 +begin; +select * from t1 where z = 1 lock in share mode; +x y z +1 one 1 +select * from t1 where x = 1 for update; +x y z +1 one 1 +select * from t1 where x = 2 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +rollback; +commit; +drop table t1; diff --git a/mysql-test/r/ndb_truncate.result b/mysql-test/r/ndb_truncate.result index 38f3a78029c..811e5e3afeb 100644 --- a/mysql-test/r/ndb_truncate.result +++ b/mysql-test/r/ndb_truncate.result @@ -1,14 +1,23 @@ -DROP TABLE IF EXISTS t2; -CREATE TABLE t2 ( -a bigint unsigned NOT NULL PRIMARY KEY, +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +a bigint unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY, b int unsigned not null, c int unsigned ) engine=ndbcluster; -select count(*) from t2; +select count(*) from t1; count(*) 5000 -truncate table t2; -select count(*) from t2; +select * from t1 order by a limit 2; +a b c +1 509 2500 +2 510 7 +truncate table t1; +select count(*) from t1; count(*) 0 -drop table t2; +insert into t1 values(NULL,1,1),(NULL,2,2); +select * from t1 order by a; +a b c +1 1 1 +2 2 2 +drop table t1; diff --git a/mysql-test/r/sysdate_is_now.result b/mysql-test/r/sysdate_is_now.result index 82861436ff6..1ebbb8c1588 100644 --- a/mysql-test/r/sysdate_is_now.result +++ b/mysql-test/r/sysdate_is_now.result @@ -1,4 +1,4 @@ set timestamp=1; SELECT sleep(1),NOW()-SYSDATE() as zero; sleep(1) zero -0 0 +0 0.000000 diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 968c6d3ec6f..4caec152a1f 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1397,3 +1397,14 @@ c1 9999999999999999999999999999999999999999999999999999999999999999 9999999999999999999999999999999999999999999999999999999999999999 drop table t1; +create table t1 (i int, j int); +insert into t1 values (1,1), (1,2), (2,3), (2,4); +select i, count(distinct j) from t1 group by i; +i count(distinct j) +1 2 +2 2 +select i+0.0 as i2, count(distinct j) from t1 group by i2; +i2 count(distinct j) +1.0 2 +2.0 2 +drop table t1; diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result index f6559e6f838..a8619201810 100644 --- a/mysql-test/r/view_grant.result +++ b/mysql-test/r/view_grant.result @@ -618,3 +618,32 @@ ERROR HY000: There is no 'no-such-user'@'localhost' registered DROP VIEW v; DROP TABLE t1; USE test; +CREATE USER mysqltest_db1@localhost identified by 'PWD'; +GRANT ALL ON mysqltest_db1.* TO mysqltest_db1@localhost WITH GRANT OPTION; +CREATE SCHEMA mysqltest_db1 ; +USE mysqltest_db1 ; +CREATE TABLE t1 (f1 INTEGER); +CREATE VIEW view1 AS +SELECT * FROM t1; +SHOW CREATE VIEW view1; +View Create View +view1 CREATE ALGORITHM=UNDEFINED DEFINER=`mysqltest_db1`@`localhost` SQL SECURITY DEFINER VIEW `view1` AS select `t1`.`f1` AS `f1` from `t1` +CREATE VIEW view2 AS +SELECT * FROM view1; +# Here comes a suspicious warning +SHOW CREATE VIEW view2; +View Create View +view2 CREATE ALGORITHM=UNDEFINED DEFINER=`mysqltest_db1`@`localhost` SQL SECURITY DEFINER VIEW `view2` AS select `view1`.`f1` AS `f1` from `view1` +# But the view view2 is usable +SELECT * FROM view2; +f1 +CREATE VIEW view3 AS +SELECT * FROM view2; +SELECT * from view3; +f1 +DROP VIEW mysqltest_db1.view3; +DROP VIEW mysqltest_db1.view2; +DROP VIEW mysqltest_db1.view1; +DROP TABLE mysqltest_db1.t1; +DROP SCHEMA mysqltest_db1; +DROP USER mysqltest_db1@localhost; diff --git a/mysql-test/t/ctype_sjis.test b/mysql-test/t/ctype_sjis.test index 1d807b5e9a8..01e0b334554 100644 --- a/mysql-test/t/ctype_sjis.test +++ b/mysql-test/t/ctype_sjis.test @@ -78,6 +78,6 @@ SET collation_connection='sjis_bin'; --character_set sjis SET NAMES sjis; -SELECT HEX('²“‘@\Œ\') FROM DUAL; +SELECT HEX('²“‘@Œ\') FROM DUAL; # End of 4.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 1c7f387e354..dc2e74c9365 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -367,6 +367,13 @@ select last_day('2005-01-00'); select monthname(str_to_date(null, '%m')), monthname(str_to_date(null, '%m')), monthname(str_to_date(1, '%m')), monthname(str_to_date(0, '%m')); +# +# Bug #16546 +# + +select now() - now() + 0, curtime() - curtime() + 0, + sec_to_time(1) + 0, from_unixtime(1) + 0; + # End of 4.1 tests explain extended select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a1, diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 3c829848bf3..ee03088b8c3 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -109,6 +109,39 @@ unlock tables; drop table t1; # +# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock +# +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +# +connection con1; +CREATE DATABASE mysqltest_1; +FLUSH TABLES WITH READ LOCK; +# +# With bug in place: acquire LOCK_mysql_create_table and +# wait in wait_if_global_read_lock(). +connection con2; +send DROP DATABASE mysqltest_1; +--sleep 1 +# +# With bug in place: try to acquire LOCK_mysql_create_table... +# When fixed: Reject dropping db because of the read lock. +connection con1; +--error ER_CANT_UPDATE_WITH_READLOCK +DROP DATABASE mysqltest_1; +UNLOCK TABLES; +# +connection con2; +reap; +# +connection default; +disconnect con1; +disconnect con2; +# This must have been dropped by connection 2 already, +# which waited until the global read lock was released. +--error ER_DB_DROP_EXISTS +DROP DATABASE mysqltest_1; + # Bug#16986 - Deadlock condition with MyISAM tables # connection locker; diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test index 7ea14a811ed..400279a826b 100644 --- a/mysql-test/t/merge.test +++ b/mysql-test/t/merge.test @@ -390,4 +390,13 @@ insert into t1 values ("Monty"),("WAX"),("Walrus"); alter table t1 engine=MERGE; drop table t1; +# +# BUG#19648 - Merge table does not work with bit types +# +create table t1 (b bit(1)); +create table t2 (b bit(1)); +create table tm (b bit(1)) engine = merge union = (t1,t2); +select * from tm; +drop table tm, t1, t2; + # End of 5.0 tests diff --git a/mysql-test/t/ndb_condition_pushdown.test b/mysql-test/t/ndb_condition_pushdown.test index 398ca3c502c..cc138b32b7e 100644 --- a/mysql-test/t/ndb_condition_pushdown.test +++ b/mysql-test/t/ndb_condition_pushdown.test @@ -1686,5 +1686,27 @@ select * from t1 where b like 'ab' or b like 'ab'; select * from t1 where b like 'abc'; select * from t1 where b like 'abc' or b like 'abc'; +# bug#20406 (maybe same as bug#17421 -1, not seen on 32-bit x86) +drop table t1; +create table t1 ( fname varchar(255), lname varchar(255) ) +engine=ndbcluster; +insert into t1 values ("Young","Foo"); + +set engine_condition_pushdown = 0; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +set engine_condition_pushdown = 1; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); + +# make sure optimizer does not do some crazy shortcut +insert into t1 values ("aaa", "aaa"); +insert into t1 values ("bbb", "bbb"); +insert into t1 values ("ccc", "ccc"); +insert into t1 values ("ddd", "ddd"); + +set engine_condition_pushdown = 0; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); +set engine_condition_pushdown = 1; +SELECT fname, lname FROM t1 WHERE (fname like 'Y%') or (lname like 'F%'); + set engine_condition_pushdown = @old_ecpd; DROP TABLE t1,t2,t3,t4,t5; diff --git a/mysql-test/t/ndb_loaddatalocal.test b/mysql-test/t/ndb_loaddatalocal.test new file mode 100644 index 00000000000..47054ecfbf5 --- /dev/null +++ b/mysql-test/t/ndb_loaddatalocal.test @@ -0,0 +1,70 @@ +-- source include/have_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1(a int) engine=myisam; +let $1=10000; +disable_query_log; +set SQL_LOG_BIN=0; +while ($1) +{ + insert into t1 values(1); + dec $1; +} +set SQL_LOG_BIN=1; +enable_query_log; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +#This will generate a 20KB file, now test LOAD DATA LOCAL +drop table t1; + +create table t1(a int) engine=ndb; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +select count(*) from t1; +system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile ; +drop table t1; + +create table t1(a int) engine=myisam; +insert into t1 values (1), (2), (2), (3); +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; + +create table t1(a int primary key) engine=ndb; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile; +select * from t1 order by a; +drop table t1; + +create table t1(a int) engine=myisam; +insert into t1 values (1), (1), (2), (3); +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; + +create table t1(a int primary key) engine=ndb; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile; +select * from t1 order by a; +drop table t1; + +create table t1(a int) engine=myisam; +insert into t1 values (1), (2), (3), (3); +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval select * into outfile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' from t1; +drop table t1; + +create table t1(a int primary key) engine=ndb; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data local infile '$MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile' into table t1; +system rm $MYSQLTEST_VARDIR/master-data/ndb_loaddatalocal.select_outfile; +select * from t1 order by a; +drop table t1; + +# End of 4.1 tests diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test index 6945f91ee39..54214ee72ec 100644 --- a/mysql-test/t/ndb_lock.test +++ b/mysql-test/t/ndb_lock.test @@ -69,4 +69,117 @@ insert into t1 values (1,1,1); drop table t1; +# Lock for update + +create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; + +insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); + +# PK access +connection con1; +begin; +select * from t1 where x = 1 for update; + +connection con2; +begin; +select * from t1 where x = 2 for update; +--error 1205 +select * from t1 where x = 1 for update; +rollback; + +connection con1; +commit; + +# table scan +connection con1; +begin; +select * from t1 where y = 'one' or y = 'three' order by x for update; + +connection con2; +begin; +# Have to check with pk access here since scans take locks on +# all rows and then release them in chunks +# Bug #20390 SELECT FOR UPDATE does not release locks of untouched rows in full table scans +#select * from t1 where x = 2 for update; +--error 1205 +select * from t1 where x = 1 for update; +rollback; + +connection con1; +commit; + +# index scan +connection con1; +begin; +select * from t1 where z > 1 and z < 3 for update; + +connection con2; +begin; +# Have to check with pk access here since scans take locks on +# all rows and then release them in chunks +select * from t1 where x = 1 for update; +--error 1205 +select * from t1 where x = 2 for update; +rollback; + +connection con1; +commit; + +# share locking + +# PK access +connection con1; +begin; +select * from t1 where x = 1 lock in share mode; + +connection con2; +begin; +select * from t1 where x = 1 lock in share mode; +select * from t1 where x = 2 for update; +--error 1205 +select * from t1 where x = 1 for update; +rollback; + +connection con1; +commit; + +# table scan +connection con1; +begin; +select * from t1 where y = 'one' or y = 'three' order by x lock in share mode; + +connection con2; +begin; +select * from t1 where y = 'one' lock in share mode; +# Have to check with pk access here since scans take locks on +# all rows and then release them in chunks +# Bug #20390 SELECT FOR UPDATE does not release locks of untouched rows in full table scans +#select * from t1 where x = 2 for update; +--error 1205 +select * from t1 where x = 1 for update; +rollback; + +connection con1; +commit; + +# index scan +connection con1; +begin; +select * from t1 where z > 1 and z < 3 lock in share mode; + +connection con2; +begin; +select * from t1 where z = 1 lock in share mode; +# Have to check with pk access here since scans take locks on +# all rows and then release them in chunks +select * from t1 where x = 1 for update; +--error 1205 +select * from t1 where x = 2 for update; +rollback; + +connection con1; +commit; + +drop table t1; + # End of 4.1 tests diff --git a/mysql-test/t/ndb_truncate.test b/mysql-test/t/ndb_truncate.test index 73af70d0d0f..a1ef4be0d48 100644 --- a/mysql-test/t/ndb_truncate.test +++ b/mysql-test/t/ndb_truncate.test @@ -2,12 +2,11 @@ -- source include/not_embedded.inc --disable_warnings -DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t1, t2; --enable_warnings - -CREATE TABLE t2 ( - a bigint unsigned NOT NULL PRIMARY KEY, +CREATE TABLE t1 ( + a bigint unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY, b int unsigned not null, c int unsigned ) engine=ndbcluster; @@ -20,17 +19,23 @@ let $1=500; disable_query_log; while ($1) { - eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1); + eval insert into t1 values(NULL, $1+9, 5*$1), (NULL, $1+10, 7),(NULL, $1+10, 7*$1), (NULL, $1+10, 10+$1), (NULL, $1+10, 70*$1), (NULL, $1+10, 7), (NULL, $1+10, 9), (NULL, $1+299, 899), (NULL, $1+10, 12), (NULL, $1+10, 14*$1); dec $1; } enable_query_log; -select count(*) from t2; +select count(*) from t1; + +select * from t1 order by a limit 2; + +truncate table t1; + +select count(*) from t1; -truncate table t2; +insert into t1 values(NULL,1,1),(NULL,2,2); -select count(*) from t2; +select * from t1 order by a; -drop table t2; +drop table t1; # End of 4.1 tests diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index 74782a5bddb..35aff8b3c5a 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -1095,3 +1095,12 @@ insert into t1 values( insert into t1 values(1e100); select * from t1; drop table t1; + +# +# Bug#19667 group by a decimal expression yields wrong result +# +create table t1 (i int, j int); +insert into t1 values (1,1), (1,2), (2,3), (2,4); +select i, count(distinct j) from t1 group by i; +select i+0.0 as i2, count(distinct j) from t1 group by i2; +drop table t1; diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test index 4663a667d25..f160de2d798 100644 --- a/mysql-test/t/view_grant.test +++ b/mysql-test/t/view_grant.test @@ -807,3 +807,42 @@ SELECT * FROM v; DROP VIEW v; DROP TABLE t1; USE test; + +# +# Bug#20363: Create view on just created view is now denied +# +eval CREATE USER mysqltest_db1@localhost identified by 'PWD'; +eval GRANT ALL ON mysqltest_db1.* TO mysqltest_db1@localhost WITH GRANT OPTION; + +# The session with the non root user is needed. +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK +connect (session1,localhost,mysqltest_db1,PWD,test); + +CREATE SCHEMA mysqltest_db1 ; +USE mysqltest_db1 ; + +CREATE TABLE t1 (f1 INTEGER); + +CREATE VIEW view1 AS +SELECT * FROM t1; +SHOW CREATE VIEW view1; + +CREATE VIEW view2 AS +SELECT * FROM view1; +--echo # Here comes a suspicious warning +SHOW CREATE VIEW view2; +--echo # But the view view2 is usable +SELECT * FROM view2; + +CREATE VIEW view3 AS +SELECT * FROM view2; + +SELECT * from view3; + +connection default; +DROP VIEW mysqltest_db1.view3; +DROP VIEW mysqltest_db1.view2; +DROP VIEW mysqltest_db1.view1; +DROP TABLE mysqltest_db1.t1; +DROP SCHEMA mysqltest_db1; +DROP USER mysqltest_db1@localhost; diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 3c2ac1a7ea5..65c5a82db36 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -292,3 +292,18 @@ futex(utime) fun:__lll_mutex_unlock_wake } + +# +# BUG#19940: NDB sends uninitialized parts of field buffers across the wire. +# This is "works as designed"; the uninitialized part is not used at the +# other end (but Valgrind cannot see this). +# +{ + bug19940 + Memcheck:Param + socketcall.sendto(msg) + fun:send + fun:_ZN15TCP_Transporter6doSendEv + fun:_ZN19TransporterRegistry11performSendEv + fun:_ZN19TransporterRegistry14forceSendCheckEi +} diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp index e9f92d84d1c..638ecb17779 100644 --- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp +++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp @@ -61,11 +61,14 @@ public: Uint32 parallel, bool order_by, bool order_desc = false, - bool read_range_no = false) { + bool read_range_no = false, + bool keyinfo = false) { Uint32 scan_flags = (SF_OrderBy & -(Int32)order_by) | (SF_Descending & -(Int32)order_desc) | - (SF_ReadRangeNo & -(Int32)read_range_no); + (SF_ReadRangeNo & -(Int32)read_range_no) | + (SF_KeyInfo & -(Int32)keyinfo); + return readTuples(lock_mode, scan_flags, parallel); } #endif diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp index 77b255dc2f4..4a8425852b9 100644 --- a/ndb/include/ndbapi/NdbScanOperation.hpp +++ b/ndb/include/ndbapi/NdbScanOperation.hpp @@ -44,7 +44,8 @@ public: SF_TupScan = (1 << 16), // scan TUP - only LM_CommittedRead SF_OrderBy = (1 << 24), // index scan in order SF_Descending = (2 << 24), // index scan in descending order - SF_ReadRangeNo = (4 << 24) // enable @ref get_range_no + SF_ReadRangeNo = (4 << 24), // enable @ref get_range_no + SF_KeyInfo = 1 // request KeyInfo to be sent back }; /** @@ -61,15 +62,14 @@ public: #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED /** * readTuples - * * @param lock_mode Lock mode * @param batch No of rows to fetch from each fragment at a time * @param parallel No of fragments to scan in parallell - * @note specifying 0 for batch and parallall means max performance + * @note specifying 0 for batch and parallell means max performance */ #ifdef ndb_readtuples_impossible_overload int readTuples(LockMode lock_mode = LM_Read, - Uint32 batch = 0, Uint32 parallel = 0); + Uint32 batch = 0, Uint32 parallel = 0, bool keyinfo = false); #endif inline int readTuples(int parallell){ @@ -142,6 +142,20 @@ public: void close(bool forceSend = false, bool releaseOp = false); /** + * Lock current tuple + * + * @return an NdbOperation or NULL. + */ + NdbOperation* lockCurrentTuple(); + /** + * Lock current tuple + * + * @param lockTrans Transaction that should perform the lock + * + * @return an NdbOperation or NULL. + */ + NdbOperation* lockCurrentTuple(NdbTransaction* lockTrans); + /** * Update current tuple * * @return an NdbOperation or NULL. @@ -251,6 +265,19 @@ protected: inline NdbOperation* +NdbScanOperation::lockCurrentTuple(){ + return lockCurrentTuple(m_transConnection); +} + +inline +NdbOperation* +NdbScanOperation::lockCurrentTuple(NdbTransaction* takeOverTrans){ + return takeOverScanOp(NdbOperation::ReadRequest, + takeOverTrans); +} + +inline +NdbOperation* NdbScanOperation::updateCurrentTuple(){ return updateCurrentTuple(m_transConnection); } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 908e2a81345..d2f9150ade0 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -234,6 +234,7 @@ void Cmvmi::execEVENT_REP(Signal* signal) void Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ EventSubscribeReq * subReq = (EventSubscribeReq *)&signal->theData[0]; + Uint32 senderRef = signal->getSendersBlockRef(); SubscriberPtr ptr; jamEntry(); DBUG_ENTER("Cmvmi::execEVENT_SUBSCRIBE_REQ"); @@ -251,7 +252,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ * Create a new one */ if(subscribers.seize(ptr) == false){ - sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB); + sendSignal(senderRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB); return; } ptr.p->logLevel.clear(); @@ -278,7 +279,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ } signal->theData[0] = ptr.i; - sendSignal(ptr.p->blockRef, GSN_EVENT_SUBSCRIBE_CONF, signal, 1, JBB); + sendSignal(senderRef, GSN_EVENT_SUBSCRIBE_CONF, signal, 1, JBB); DBUG_VOID_RETURN; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 41359fe65bd..2505515c086 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -119,41 +119,50 @@ MgmtSrvr::logLevelThreadRun() /** * Handle started nodes */ - EventSubscribeReq req; - req = m_event_listner[0].m_logLevel; - req.blockRef = _ownReference; - - SetLogLevelOrd ord; - m_started_nodes.lock(); - while(m_started_nodes.size() > 0){ - Uint32 node = m_started_nodes[0]; - m_started_nodes.erase(0, false); - m_started_nodes.unlock(); + if (m_started_nodes.size() > 0) + { + // calculate max log level + EventSubscribeReq req; + { + LogLevel tmp; + m_event_listner.lock(); + for(int i = m_event_listner.m_clients.size() - 1; i >= 0; i--) + tmp.set_max(m_event_listner[i].m_logLevel); + m_event_listner.unlock(); + req = tmp; + } + req.blockRef = _ownReference; + while (m_started_nodes.size() > 0) + { + Uint32 node = m_started_nodes[0]; + m_started_nodes.erase(0, false); + m_started_nodes.unlock(); - setEventReportingLevelImpl(node, req); - - ord = m_nodeLogLevel[node]; - setNodeLogLevelImpl(node, ord); - - m_started_nodes.lock(); - } + setEventReportingLevelImpl(node, req); + + SetLogLevelOrd ord; + ord = m_nodeLogLevel[node]; + setNodeLogLevelImpl(node, ord); + + m_started_nodes.lock(); + } + } m_started_nodes.unlock(); m_log_level_requests.lock(); - while(m_log_level_requests.size() > 0){ - req = m_log_level_requests[0]; + while (m_log_level_requests.size() > 0) + { + EventSubscribeReq req = m_log_level_requests[0]; m_log_level_requests.erase(0, false); m_log_level_requests.unlock(); - - LogLevel tmp; - tmp = req; - + if(req.blockRef == 0){ req.blockRef = _ownReference; setEventReportingLevelImpl(0, req); } else { - ord = req; + SetLogLevelOrd ord; + ord = req; setNodeLogLevelImpl(req.blockRef, ord); } m_log_level_requests.lock(); @@ -1538,7 +1547,8 @@ int MgmtSrvr::setEventReportingLevelImpl(int nodeId, const EventSubscribeReq& ll) { - INIT_SIGNAL_SENDER(ss,nodeId); + SignalSender ss(theFacade); + ss.lock(); SimpleSignal ssig; EventSubscribeReq * dst = @@ -1547,41 +1557,54 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, EventSubscribeReq::SignalLength); *dst = ll; - send(ss,ssig,nodeId,NODE_TYPE_DB); + NodeBitmask nodes; + nodes.clear(); + Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId; + for(; nodeId <= max; nodeId++) + { + if (nodeTypes[nodeId] != NODE_TYPE_DB) + continue; + if (okToSendTo(nodeId, true)) + continue; + if (ss.sendSignal(nodeId, &ssig) == SEND_OK) + { + nodes.set(nodeId); + } + } -#if 0 - while (1) + int error = 0; + while (!nodes.isclear()) { SimpleSignal *signal = ss.waitFor(); int gsn = signal->readSignalNumber(); - switch (gsn) { + nodeId = refToNode(signal->header.theSendersBlockRef); + switch (gsn) { case GSN_EVENT_SUBSCRIBE_CONF:{ + nodes.clear(nodeId); break; } case GSN_EVENT_SUBSCRIBE_REF:{ - return SEND_OR_RECEIVE_FAILED; + nodes.clear(nodeId); + error = 1; + break; } case GSN_NF_COMPLETEREP:{ const NFCompleteRep * const rep = CAST_CONSTPTR(NFCompleteRep, signal->getDataPtr()); - if (rep->failedNodeId == nodeId) - return SEND_OR_RECEIVE_FAILED; + nodes.clear(rep->failedNodeId); break; } case GSN_NODE_FAILREP:{ - const NodeFailRep * const rep = - CAST_CONSTPTR(NodeFailRep, signal->getDataPtr()); - if (NodeBitmask::get(rep->theNodes,nodeId)) - return SEND_OR_RECEIVE_FAILED; + // ignore, NF_COMPLETEREP will arrive later break; } default: report_unknown_signal(signal); return SEND_OR_RECEIVE_FAILED; } - } -#endif + if (error) + return SEND_OR_RECEIVE_FAILED; return 0; } @@ -1601,19 +1624,6 @@ MgmtSrvr::setNodeLogLevelImpl(int nodeId, const SetLogLevelOrd & ll) return ss.sendSignal(nodeId, &ssig) == SEND_OK ? 0 : SEND_OR_RECEIVE_FAILED; } -int -MgmtSrvr::send(SignalSender &ss, SimpleSignal &ssig, Uint32 node, Uint32 node_type){ - Uint32 max = (node == 0) ? MAX_NODES : node + 1; - - for(; node < max; node++){ - while(nodeTypes[node] != (int)node_type && node < max) node++; - if(nodeTypes[node] != (int)node_type) - break; - ss.sendSignal(node, &ssig); - } - return 0; -} - //**************************************************************************** //**************************************************************************** diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 2b84eac6d2c..7811cf0e5d1 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -494,8 +494,6 @@ public: private: //************************************************************************** - int send(SignalSender &ss, SimpleSignal &ssig, Uint32 node, Uint32 node_type); - int sendStopMgmd(NodeId nodeId, bool abort, bool stop, diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 16810ea25bc..0524aba4c32 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -840,9 +840,8 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &, m_mgmsrv.m_event_listner.unlock(); { - LogLevel ll; - ll.setLogLevel(category,level); - m_mgmsrv.m_event_listner.update_max_log_level(ll); + LogLevel tmp; + m_mgmsrv.m_event_listner.update_max_log_level(tmp); } m_output->println(reply); @@ -1370,21 +1369,23 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) void Ndb_mgmd_event_service::update_max_log_level(const LogLevel &log_level) { - LogLevel tmp= m_logLevel; - tmp.set_max(log_level); + LogLevel tmp = log_level; + m_clients.lock(); + for(int i = m_clients.size() - 1; i >= 0; i--) + tmp.set_max(m_clients[i].m_logLevel); + m_clients.unlock(); update_log_level(tmp); } void Ndb_mgmd_event_service::update_log_level(const LogLevel &tmp) { - if(!(tmp == m_logLevel)){ - m_logLevel = tmp; - EventSubscribeReq req; - req = tmp; - req.blockRef = 0; - m_mgmsrv->m_log_level_requests.push_back(req); - } + m_logLevel = tmp; + EventSubscribeReq req; + req = tmp; + // send update to all nodes + req.blockRef = 0; + m_mgmsrv->m_log_level_requests.push_back(req); } void diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index a3e2e69ce14..bd50440b3c0 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2267,7 +2267,7 @@ NdbDictionaryImpl::dropIndex(const char * indexName, m_error.code = 4243; return -1; } - int ret = dropIndex(*idx); //, tableName); + int ret = dropIndex(*idx); // If index stored in cache is incompatible with the one in the kernel // we must clear the cache and try again if (ret == INCOMPATIBLE_VERSION) { diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index b0b7adf25f3..bc9894497f8 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -395,7 +395,6 @@ public: int createIndex(NdbIndexImpl &ix); int dropIndex(const char * indexName, const char * tableName); - // int dropIndex(NdbIndexImpl &, const char * tableName); int dropIndex(NdbIndexImpl &); NdbTableImpl * getIndexTable(NdbIndexImpl * index, NdbTableImpl * table); diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 869704c7bb3..7d0712e117d 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -160,7 +160,7 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, return -1; } - m_keyInfo = lockExcl ? 1 : 0; + m_keyInfo = ((scan_flags & SF_KeyInfo) || lockExcl) ? 1 : 0; bool rangeScan = false; if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex) @@ -924,18 +924,28 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans) if (newOp == NULL){ return NULL; } + if (!m_keyInfo) + { + // Cannot take over lock if no keyinfo was requested + setErrorCodeAbort(4604); + return NULL; + } pTrans->theSimpleState = 0; const Uint32 len = (tRecAttr->attrSize() * tRecAttr->arraySize() + 3)/4-1; newOp->theTupKeyLen = len; newOp->theOperationType = opType; - if (opType == DeleteRequest) { - newOp->theStatus = GetValue; - } else { - newOp->theStatus = SetValue; + switch (opType) { + case (ReadRequest): + newOp->theLockMode = theLockMode; + // Fall through + case (DeleteRequest): + newOp->theStatus = GetValue; + break; + default: + newOp->theStatus = SetValue; } - const Uint32 * src = (Uint32*)tRecAttr->aRef(); const Uint32 tScanInfo = src[len] & 0x3FFFF; const Uint32 tTakeOverFragment = src[len] >> 20; diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 5ca8ad7be60..a2b96b32630 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -290,7 +290,7 @@ ErrorBundle ErrorCodes[] = { { 4601, AE, "Transaction is not started"}, { 4602, AE, "You must call getNdbOperation before executeScan" }, { 4603, AE, "There can only be ONE operation in a scan transaction" }, - { 4604, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" }, + { 4604, AE, "takeOverScanOp, to take over a scanned row one must explicitly request keyinfo in readTuples call" }, { 4605, AE, "You may only call openScanRead or openScanExclusive once for each operation"}, { 4607, AE, "There may only be one operation in a scan transaction"}, { 4608, AE, "You can not takeOverScan unless you have used openScanExclusive"}, diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h index c762b7c286e..a73f368c51d 100644 --- a/sql/ha_myisammrg.h +++ b/sql/ha_myisammrg.h @@ -37,7 +37,8 @@ class ha_myisammrg: public handler { return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED | - HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE); + HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE | + HA_CAN_BIT_FIELD); } ulong index_flags(uint inx, uint part, bool all_parts) const { diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 19b321ff3ca..98dd9d5a122 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -70,7 +70,7 @@ handlerton ndbcluster_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ - HTON_NO_FLAGS + HTON_CAN_RECREATE }; #define NDB_AUTO_INCREMENT_RETRIES 10 @@ -1174,12 +1174,23 @@ void ha_ndbcluster::release_metadata() int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type) { + DBUG_ENTER("ha_ndbcluster::get_ndb_lock_type"); if (type >= TL_WRITE_ALLOW_WRITE) - return NdbOperation::LM_Exclusive; - else if (uses_blob_value(m_retrieve_all_fields)) - return NdbOperation::LM_Read; + { + DBUG_PRINT("info", ("Using exclusive lock")); + DBUG_RETURN(NdbOperation::LM_Exclusive); + } + else if (type == TL_READ_WITH_SHARED_LOCKS || + uses_blob_value(m_retrieve_all_fields)) + { + DBUG_PRINT("info", ("Using read lock")); + DBUG_RETURN(NdbOperation::LM_Read); + } else - return NdbOperation::LM_CommittedRead; + { + DBUG_PRINT("info", ("Using committed read")); + DBUG_RETURN(NdbOperation::LM_CommittedRead); + } } static const ulong index_type_flags[]= @@ -1679,7 +1690,30 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) int check; NdbTransaction *trans= m_active_trans; - bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE; + if (m_lock_tuple) + { + /* + Lock level m_lock.type either TL_WRITE_ALLOW_WRITE + (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT + LOCK WITH SHARE MODE) and row was not explictly unlocked + with unlock_row() call + */ + NdbConnection *trans= m_active_trans; + NdbOperation *op; + // Lock row + DBUG_PRINT("info", ("Keeping lock on scanned row")); + + if (!(op= m_active_cursor->lockCurrentTuple())) + { + m_lock_tuple= false; + ERR_RETURN(trans->getNdbError()); + } + m_ops_pending++; + } + m_lock_tuple= false; + + bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && + m_lock.type != TL_READ_WITH_SHARED_LOCKS; do { DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb)); /* @@ -1695,6 +1729,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) if ((check= cursor->nextResult(contact_ndb, m_force_send)) == 0) { + /* + Explicitly lock tuple if "select for update" or + "select lock in share mode" + */ + m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE + || + m_lock.type == TL_READ_WITH_SHARED_LOCKS); DBUG_RETURN(0); } else if (check == 1 || check == 2) @@ -1983,10 +2024,11 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, restart= FALSE; NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + bool need_pk = (lm == NdbOperation::LM_Read); if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *) m_index[active_index].index, (const NDBTAB *) m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending)) + op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; } else { @@ -2036,8 +2078,11 @@ int ha_ndbcluster::full_table_scan(byte *buf) NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + bool need_pk = (lm == NdbOperation::LM_Read); if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || - op->readTuples(lm, 0, parallelism)) + op->readTuples(lm, + (need_pk)?NdbScanOperation::SF_KeyInfo:0, + parallelism)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; if (generate_scan_filter(m_cond_stack, op)) @@ -2088,6 +2133,11 @@ int ha_ndbcluster::write_row(byte *record) */ if (!m_use_write && m_ignore_dup_key) { + /* + compare if expression with that in start_bulk_insert() + start_bulk_insert will set parameters to ensure that each + write_row is committed individually + */ int peek_res= peek_indexed_rows(record); if (!peek_res) @@ -2327,6 +2377,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("Calling updateTuple on cursor")); if (!(op= cursor->updateCurrentTuple())) ERR_RETURN(trans->getNdbError()); + m_lock_tuple= false; m_ops_pending++; if (uses_blob_value(FALSE)) m_blobs_pending= TRUE; @@ -2406,6 +2457,7 @@ int ha_ndbcluster::delete_row(const byte *record) DBUG_PRINT("info", ("Calling deleteTuple on cursor")); if (cursor->deleteCurrentTuple() != 0) ERR_RETURN(trans->getNdbError()); + m_lock_tuple= false; m_ops_pending++; no_uncommitted_rows_update(-1); @@ -2529,9 +2581,9 @@ void ha_ndbcluster::unpack_record(byte* buf) const NdbRecAttr* rec= m_value[hidden_no].rec; DBUG_ASSERT(rec); DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no, - hidden_col->getName(), rec->u_64_value())); + hidden_col->getName(), rec->u_64_value())); } - //print_results(); + print_results(); #endif DBUG_VOID_RETURN; } @@ -2605,6 +2657,12 @@ int ha_ndbcluster::index_init(uint index) { DBUG_ENTER("ha_ndbcluster::index_init"); DBUG_PRINT("enter", ("index: %u", index)); + /* + Locks are are explicitly released in scan + unless m_lock.type == TL_READ_HIGH_PRIORITY + and no sub-sequent call to unlock_row() + */ + m_lock_tuple= false; DBUG_RETURN(handler::index_init(index)); } @@ -3217,6 +3275,19 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) DBUG_PRINT("enter", ("rows: %d", (int)rows)); m_rows_inserted= (ha_rows) 0; + if (!m_use_write && m_ignore_dup_key) + { + /* + compare if expression with that in write_row + we have a situation where peek_indexed_rows() will be called + so we cannot batch + */ + DBUG_PRINT("info", ("Batching turned off as duplicate key is " + "ignored by using peek_row")); + m_rows_to_insert= 1; + m_bulk_insert_rows= 1; + DBUG_VOID_RETURN; + } if (rows == (ha_rows) 0) { /* We don't know how many will be inserted, guess */ @@ -3614,6 +3685,22 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) } /* + Unlock the last row read in an open scan. + Rows are unlocked by default in ndb, but + for SELECT FOR UPDATE and SELECT LOCK WIT SHARE MODE + locks are kept if unlock_row() is not called. +*/ + +void ha_ndbcluster::unlock_row() +{ + DBUG_ENTER("unlock_row"); + + DBUG_PRINT("info", ("Unlocking row")); + m_lock_tuple= false; + DBUG_VOID_RETURN; +} + +/* Start a transaction for running a statement if one is not already running in a transaction. This will be the case in a BEGIN; COMMIT; block @@ -4085,6 +4172,12 @@ int ha_ndbcluster::create(const char *name, set_dbname(name2); set_tabname(name2); + if (current_thd->lex->sql_command == SQLCOM_TRUNCATE) + { + DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE")); + if ((my_errno= delete_table(name))) + DBUG_RETURN(my_errno); + } if (create_from_engine) { /* @@ -4392,15 +4485,29 @@ int ha_ndbcluster::delete_table(const char *name) int ha_ndbcluster::drop_table() { + THD *thd= current_thd; Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); DBUG_ENTER("drop_table"); DBUG_PRINT("enter", ("Deleting %s", m_tabname)); - + release_metadata(); - if (dict->dropTable(m_tabname)) + while (dict->dropTable(m_tabname)) + { + const NdbError err= dict->getNdbError(); + switch (err.status) + { + case NdbError::TemporaryError: + if (!thd->killed) + continue; // retry indefinitly + break; + default: + break; + } ERR_RETURN(dict->getNdbError()); + } + DBUG_RETURN(0); } @@ -4809,14 +4916,24 @@ int ndbcluster_drop_database(const char *path) List_iterator_fast<char> it(drop_list); while ((tabname=it++)) { - if (dict->dropTable(tabname)) + while (dict->dropTable(tabname)) { const NdbError err= dict->getNdbError(); - if (err.code != 709) + switch (err.status) + { + case NdbError::TemporaryError: + if (!thd->killed) + continue; // retry indefinitly + break; + default: + break; + } + if (err.code != 709) // 709: No such table existed { ERR_PRINT(err); ret= ndb_to_mysql_error(&err); } + break; } } DBUG_RETURN(ret); @@ -5891,6 +6008,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, byte *end_of_buffer= (byte*)buffer->buffer_end; NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + bool need_pk = (lm == NdbOperation::LM_Read); const NDBTAB *tab= (const NDBTAB *) m_table; const NDBINDEX *unique_idx= (NDBINDEX *) m_index[active_index].unique_index; const NDBINDEX *idx= (NDBINDEX *) m_index[active_index].index; @@ -5957,7 +6075,8 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, end_of_buffer -= reclength; } else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) - &&!scanOp->readTuples(lm, 0, parallelism, sorted, FALSE, TRUE) + &&!scanOp->readTuples(lm, 0, parallelism, sorted, + FALSE, TRUE, need_pk) &&!generate_scan_filter(m_cond_stack, scanOp) &&!define_read_attrs(end_of_buffer-reclength, scanOp)) { diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index d75d7acefd9..e417996a5d9 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -503,6 +503,7 @@ class ha_ndbcluster: public handler int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); int external_lock(THD *thd, int lock_type); + void unlock_row(); int start_stmt(THD *thd, thr_lock_type lock_type); const char * table_type() const; const char ** bas_ext() const; @@ -684,6 +685,7 @@ private: char m_tabname[FN_HEADLEN]; ulong m_table_flags; THR_LOCK_DATA m_lock; + bool m_lock_tuple; NDB_SHARE *m_share; NDB_INDEX_DATA m_index[MAX_KEY]; // NdbRecAttr has no reference to blob diff --git a/sql/item_buff.cc b/sql/item_buff.cc index 9db2f465080..1661f04a4ae 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -132,7 +132,7 @@ bool Cached_item_decimal::cmp() { my_decimal tmp; my_decimal *ptmp= item->val_decimal(&tmp); - if (null_value != item->null_value || my_decimal_cmp(&value, ptmp) == 0) + if (null_value != item->null_value || my_decimal_cmp(&value, ptmp)) { null_value= item->null_value; my_decimal2decimal(ptmp, &value); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index ca7028dae1e..4195db4a6ee 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1391,7 +1391,7 @@ void Item_func_curtime::fix_length_and_dec() { TIME ltime; - decimals=0; + decimals= DATETIME_DEC; collation.set(&my_charset_bin); store_now_in_TIME(<ime); value= TIME_to_ulonglong_time(<ime); @@ -1438,7 +1438,7 @@ String *Item_func_now::val_str(String *str) void Item_func_now::fix_length_and_dec() { - decimals=0; + decimals= DATETIME_DEC; collation.set(&my_charset_bin); store_now_in_TIME(<ime); @@ -1785,7 +1785,7 @@ void Item_func_from_unixtime::fix_length_and_dec() { thd= current_thd; collation.set(&my_charset_bin); - decimals=0; + decimals= DATETIME_DEC; max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; maybe_null= 1; thd->time_zone_used= 1; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index b2f0dab0837..9a14b961130 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -614,6 +614,7 @@ public: { collation.set(&my_charset_bin); maybe_null=1; + decimals= DATETIME_DEC; max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } enum_field_types field_type() const { return MYSQL_TYPE_TIME; } diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 4caa0076c60..a52972753a7 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -424,16 +424,27 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, my_error(ER_DB_CREATE_EXISTS, MYF(0), db); DBUG_RETURN(-1); } - - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - /* do not create database if another thread is holding read lock */ + /* + Do not create database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; goto exit2; } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + /* Check directory */ strxmov(path, mysql_data_home, "/", db, NullS); path_len= unpack_dirname(path,path); // Convert if not unix @@ -537,9 +548,9 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, } exit: + VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: - VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); DBUG_RETURN(error); } @@ -553,12 +564,23 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) int error= 0; DBUG_ENTER("mysql_alter_db"); - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - - /* do not alter database if another thread is holding read lock */ + /* + Do not alter database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if ((error=wait_if_global_read_lock(thd,0,1))) goto exit2; + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + /* Check directory */ strxmov(path, mysql_data_home, "/", db, "/", MY_DB_OPT_FILE, NullS); fn_format(path, path, "", "", MYF(MY_UNPACK_FILENAME)); @@ -596,9 +618,9 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) send_ok(thd, result); exit: + VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: - VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); DBUG_RETURN(error); } @@ -630,15 +652,26 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) TABLE_LIST* dropped_tables= 0; DBUG_ENTER("mysql_rm_db"); - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - - /* do not drop database if another thread is holding read lock */ + /* + Do not drop database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; goto exit2; } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + (void) sprintf(path,"%s/%s",mysql_data_home,db); length= unpack_dirname(path,path); // Convert if not unix strmov(path+length, MY_DB_OPT_FILE); // Append db option file name @@ -747,7 +780,6 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) exit: (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ - start_waiting_global_read_lock(thd); /* If this database was the client's selected database, we silently change the client's selected database to nothing (to have an empty SELECT DATABASE() @@ -776,9 +808,9 @@ exit: thd->db= 0; thd->db_length= 0; } -exit2: VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); - + start_waiting_global_read_lock(thd); +exit2: DBUG_RETURN(error); } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index d2699b88aa4..0bbfc64e272 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -304,18 +304,7 @@ static char *get_text(LEX *lex) found_escape=1; if (lex->ptr == lex->end_of_query) return 0; -#ifdef USE_MB - int l; - if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)lex->ptr, - (const char *)lex->end_of_query))) { - lex->ptr += l; - continue; - } - else -#endif - yySkip(); + yySkip(); } else if (c == sep) { @@ -344,9 +333,6 @@ static char *get_text(LEX *lex) { uchar *to; - /* Re-use found_escape for tracking state of escapes */ - found_escape= 0; - for (to=start ; str != end ; str++) { #ifdef USE_MB @@ -360,8 +346,7 @@ static char *get_text(LEX *lex) continue; } #endif - if (!found_escape && - !(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) && + if (!(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) && *str == '\\' && str+1 != end) { switch(*++str) { @@ -388,20 +373,14 @@ static char *get_text(LEX *lex) *to++= '\\'; // remember prefix for wildcard /* Fall through */ default: - found_escape= 1; - str--; + *to++= *str; break; } } - else if (!found_escape && *str == sep) - { - found_escape= 1; - } + else if (*str == sep) + *to++= *str++; // Two ' or " else - { *to++ = *str; - found_escape= 0; - } } *to=0; lex->yytoklen=(uint) (to-start); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 213a7730824..ba5c2ebf484 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5004,7 +5004,14 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables) if (all_tables->security_ctx) thd->security_ctx= all_tables->security_ctx; - if (check_access(thd, privilege, all_tables->db, + const char *db_name; + if ((all_tables->view || all_tables->field_translation) && + !all_tables->schema_table) + db_name= all_tables->view_db.str; + else + db_name= all_tables->db; + + if (check_access(thd, privilege, db_name, &all_tables->grant.privilege, 0, 0, test(all_tables->schema_table))) goto deny; diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index bf17375c0eb..27a1b85a354 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -17,6 +17,7 @@ ### BEGIN INIT INFO # Provides: mysql # Required-Start: $local_fs $network $remote_fs +# Should-Start: ypbind nscd ldap ntpd xntpd # Required-Stop: $local_fs $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index b0971980168..3876de58b0e 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -12794,25 +12794,26 @@ from t2);"); static void test_bug8378() { #if defined(HAVE_CHARSET_gbk) && !defined(EMBEDDED_LIBRARY) - MYSQL *lmysql; + MYSQL *old_mysql=mysql; char out[9]; /* strlen(TEST_BUG8378)*2+1 */ - int len; + char buf[256]; + int len, rc; myheader("test_bug8378"); if (!opt_silent) fprintf(stdout, "\n Establishing a test connection ..."); - if (!(lmysql= mysql_init(NULL))) + if (!(mysql= mysql_init(NULL))) { myerror("mysql_init() failed"); exit(1); } - if (mysql_options(lmysql, MYSQL_SET_CHARSET_NAME, "gbk")) + if (mysql_options(mysql, MYSQL_SET_CHARSET_NAME, "gbk")) { myerror("mysql_options() failed"); exit(1); } - if (!(mysql_real_connect(lmysql, opt_host, opt_user, + if (!(mysql_real_connect(mysql, opt_host, opt_user, opt_password, current_db, opt_port, opt_unix_socket, 0))) { @@ -12822,12 +12823,18 @@ static void test_bug8378() if (!opt_silent) fprintf(stdout, " OK"); - len= mysql_real_escape_string(lmysql, out, TEST_BUG8378_IN, 4); + len= mysql_real_escape_string(mysql, out, TEST_BUG8378_IN, 4); /* No escaping should have actually happened. */ DIE_UNLESS(memcmp(out, TEST_BUG8378_OUT, len) == 0); - mysql_close(lmysql); + sprintf(buf, "SELECT '%s'", out); + rc=mysql_real_query(mysql, buf, strlen(buf)); + myquery(rc); + + mysql_close(mysql); + + mysql=old_mysql; #endif } |