diff options
author | joreland@mysql.com <> | 2004-08-24 13:29:11 +0200 |
---|---|---|
committer | joreland@mysql.com <> | 2004-08-24 13:29:11 +0200 |
commit | 6163efe1aca3f7032a45408a9b7bbc63a3bbadae (patch) | |
tree | 0edda70a93b5a42405a81fe6df1341e40e3bc37f | |
parent | 1c44cd90877a0790eb1547707051dc260d84c42b (diff) | |
parent | 908ff0495d33b3a1bc8a9382e84d8ddeabce2337 (diff) | |
download | mariadb-git-6163efe1aca3f7032a45408a9b7bbc63a3bbadae.tar.gz |
merge
90 files changed, 686 insertions, 324 deletions
diff --git a/bdb/dist/configure.ac b/bdb/dist/configure.ac index a61b8595322..98cf0f63b39 100644 --- a/bdb/dist/configure.ac +++ b/bdb/dist/configure.ac @@ -555,7 +555,7 @@ fi LIB@&t@OBJS=`echo "$LIB@&t@OBJS" | sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'` LTLIBOBJS=`echo "$LIB@&t@OBJS" | - sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'` + sed "s,\.[[^.]]* ,$o ,g;s,\.[[^.]]*$,$o,"` AC_SUBST(LTLIBOBJS) # Initial output file list. diff --git a/configure.in b/configure.in index f58ba482c59..9a902d17305 100644 --- a/configure.in +++ b/configure.in @@ -4,7 +4,7 @@ dnl Process this file with autoconf to produce a configure script. AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! -AM_INIT_AUTOMAKE(mysql, 4.1.4-beta) +AM_INIT_AUTOMAKE(mysql, 4.1.4-gamma) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 @@ -2821,7 +2821,7 @@ then AC_CONFIG_FILES(bdb/Makefile) echo "CONFIGURING FOR BERKELEY DB" - bdb_conf_flags= + bdb_conf_flags="--disable-shared" if test $with_debug = "yes" then bdb_conf_flags="$bdb_conf_flags --enable-debug --enable-diagnostic" diff --git a/include/config-win.h b/include/config-win.h index 91697c985d1..96a155633eb 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -150,6 +150,11 @@ typedef uint rf_SetTimer; #define HAVE_NAMED_PIPE /* We can only create pipes on NT */ #endif +/* ERROR is defined in wingdi.h */ +#ifdef ERROR +#undef ERROR +#endif + /* We need to close files to break connections on shutdown */ #ifndef SIGNAL_WITH_VIO_CLOSE #define SIGNAL_WITH_VIO_CLOSE diff --git a/libmysqld/examples/Makefile.am b/libmysqld/examples/Makefile.am index b3db54d305a..2712e0dff48 100644 --- a/libmysqld/examples/Makefile.am +++ b/libmysqld/examples/Makefile.am @@ -16,7 +16,7 @@ DEFS = -DEMBEDDED_LIBRARY INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir) \ -I$(top_srcdir) -I$(top_srcdir)/client $(openssl_includes) LIBS = @LIBS@ @WRAPLIBS@ @CLIENT_LIBS@ -LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @bdb_libs_with_path@ @LIBDL@ $(CXXLDFLAGS) +LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @LIBDL@ $(CXXLDFLAGS) mysqltest_LINK = $(CXXLINK) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index f4a53343e45..8092d87b97c 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -72,6 +72,11 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, THD *thd=(THD *) mysql->thd; NET *net= &mysql->net; + if (thd->data) + { + free_rows(thd->data); + thd->data= 0; + } /* Check that we are calling the client functions in right order */ if (mysql->status != MYSQL_STATUS_READY) { @@ -217,11 +222,6 @@ static int emb_stmt_execute(MYSQL_STMT *stmt) THD *thd= (THD*)stmt->mysql->thd; thd->client_param_count= stmt->param_count; thd->client_params= stmt->params; - if (thd->data) - { - free_rows(thd->data); - thd->data= 0; - } if (emb_advanced_command(stmt->mysql, COM_EXECUTE,0,0, (const char*)&stmt->stmt_id,sizeof(stmt->stmt_id), 1) || diff --git a/myisammrg/myrg_open.c b/myisammrg/myrg_open.c index 2d6b6dcf167..4c6ffb98ad5 100644 --- a/myisammrg/myrg_open.c +++ b/myisammrg/myrg_open.c @@ -32,20 +32,16 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) { - int save_errno,errpos; - uint files,i,dir_length,length,key_parts; + int save_errno,errpos=0; + uint files=0,i,dir_length,length,key_parts; ulonglong file_offset; char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end; - MYRG_INFO *m_info; + MYRG_INFO *m_info=0; File fd; IO_CACHE file; - MI_INFO *isam; + MI_INFO *isam=0; DBUG_ENTER("myrg_open"); - LINT_INIT(m_info); - m_info=0; - isam=0; - errpos=files=0; bzero((char*) &file,sizeof(file)); if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT,4), O_RDONLY | O_SHARE,MYF(0))) < 0) diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index a3972ad8f26..bbd3fa3257a 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -194,6 +194,7 @@ stop_default_ndbcluster() { exec_mgmtclient="$exec_mgmtclient --try-reconnect=1" echo "all stop" | $exec_mgmtclient 2>&1 | cat > /dev/null +echo "3 stop" | $exec_mgmtclient 2>&1 | cat > /dev/null if [ -f "$fs_ndb/$pidfile" ] ; then kill -9 `cat "$fs_ndb/$pidfile"` 2> /dev/null diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result index 1ddbc18d965..5c98bc5b612 100644 --- a/mysql-test/r/func_gconcat.result +++ b/mysql-test/r/func_gconcat.result @@ -294,6 +294,21 @@ grp select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1; grp 2,4,3,5 +select t1.a, group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1 group by 1; +a grp +1 2 +2 4,3 +3 5 +select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1 group by 1; +a grp +1 2 +2 4,3 +3 5 +select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1; +a grp +1 2 +2 4,3 +3 5 select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp; a c grp 3 5 3,3 diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result index 91102cd37c8..0e99c939ea7 100644 --- a/mysql-test/r/ndb_blob.result +++ b/mysql-test/r/ndb_blob.result @@ -1,4 +1,5 @@ drop table if exists t1; +drop database if exists test2; set autocommit=0; create table t1 ( a int not null primary key, @@ -255,6 +256,20 @@ a b c d 7 7xb7 777 7xdd7 8 8xb8 888 8xdd8 9 9xb9 999 9xdd9 +create database test2; +use test2; +CREATE TABLE t2 ( +a bigint unsigned NOT NULL PRIMARY KEY, +b int unsigned not null, +c int unsigned +) engine=ndbcluster; +insert into t2 values (1,1,1),(2,2,2); +select * from test.t1,t2 where test.t1.a = t2.a order by test.t1.a; +a b c d a b c +1 1xb1 111 1xdd1 1 1 1 +2 2xb2 222 2xdd2 2 2 2 +drop table t2; +use test; delete from t1 where c >= 100; commit; select count(*) from t1; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index fe7ffa9b661..ff5c9dfe813 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1251,11 +1251,6 @@ a b 2 NULL 3 1 drop table t1, t2; -create table t1(City VARCHAR(30),Location geometry); -insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)')); -select City from t1 where (select intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 50, 2.5 47, 2 47, 2 50))'))=0); -City -drop table t1; CREATE TABLE `t1` ( `id` mediumint(8) unsigned NOT NULL auto_increment, `pseudo` varchar(35) NOT NULL default '', @@ -1912,3 +1907,64 @@ a 1 2 drop table t1,t2; +CREATE TABLE t1(`IZAVORGANG_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`KUERZEL` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,`IZAANALYSEART_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`IZAPMKZ_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin); +CREATE INDEX AK01IZAVORGANG ON t1(izaAnalyseart_id,Kuerzel); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000001','601','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000002','602','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000003','603','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000004','101','D0000000001','I0000000001'); +SELECT `IZAVORGANG_ID` FROM t1 WHERE `KUERZEL` IN(SELECT MIN(`KUERZEL`)`Feld1` FROM t1 WHERE `KUERZEL` LIKE'601%'And`IZAANALYSEART_ID`='D0000000001'); +IZAVORGANG_ID +D0000000001 +drop table t1; +CREATE TABLE `t1` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`)); +CREATE TABLE `t2` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`)); +insert into t1 values (1,1),(1,2),(2,1),(2,2); +insert into t2 values (1,2),(2,2); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +aid bid +1 1 +2 1 +alter table t2 drop primary key; +alter table t2 add key KEY1 (aid, bid); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +aid bid +1 1 +2 1 +alter table t2 drop key KEY1; +alter table t2 add primary key (bid, aid); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +aid bid +1 1 +2 1 +drop table t1,t2; +CREATE TABLE t1 (howmanyvalues bigint, avalue int); +INSERT INTO t1 VALUES (1, 1),(2, 1),(2, 2),(3, 1),(3, 2),(3, 3),(4, 1),(4, 2),(4, 3),(4, 4); +SELECT howmanyvalues, count(*) from t1 group by howmanyvalues; +howmanyvalues count(*) +1 1 +2 2 +3 3 +4 4 +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues; +howmanyvalues mycount +1 1 +2 2 +3 3 +4 4 +CREATE INDEX t1_howmanyvalues_idx ON t1 (howmanyvalues); +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues+1 = a.howmanyvalues+1) as mycount from t1 a group by a.howmanyvalues; +howmanyvalues mycount +1 1 +2 2 +3 3 +4 4 +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues; +howmanyvalues mycount +1 1 +2 2 +3 3 +4 4 +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.avalue) as mycount from t1 a group by a.howmanyvalues; +ERROR 42S22: Unknown column 'a.avalue' in 'where clause' +drop table t1; diff --git a/mysql-test/r/subselect_gis.result b/mysql-test/r/subselect_gis.result new file mode 100644 index 00000000000..34ab7748656 --- /dev/null +++ b/mysql-test/r/subselect_gis.result @@ -0,0 +1,8 @@ +drop table if exists t1; +create table t1(City VARCHAR(30),Location geometry); +insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)')); +select City from t1 where (select +intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 +50, 2.5 47, 2 47, 2 50))'))=0); +City +drop table t1; diff --git a/mysql-test/t/flush_block_commit-master.opt b/mysql-test/t/flush_block_commit-master.opt deleted file mode 100644 index a25aa115e06..00000000000 --- a/mysql-test/t/flush_block_commit-master.opt +++ /dev/null @@ -1 +0,0 @@ ---loose-innodb_lock_wait_timeout=5 diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test index ad19c8414ec..3f671377c4e 100644 --- a/mysql-test/t/func_gconcat.test +++ b/mysql-test/t/func_gconcat.test @@ -169,9 +169,11 @@ create table t2 (a int, c int); insert into t2 values (1, 5), (2, 4), (3, 3), (3,3); select group_concat(c) from t1; select group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1; - select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1; select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1; +select t1.a, group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1 group by 1; +select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1 group by 1; +select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1; # The following returns random results as we are sorting on blob addresses # select group_concat(c order by (select group_concat(c order by a) from t2 where t2.a=t1.a)) as grp from t1; diff --git a/mysql-test/t/ndb_blob.test b/mysql-test/t/ndb_blob.test index 391d2a2ec23..3671c7fd07a 100644 --- a/mysql-test/t/ndb_blob.test +++ b/mysql-test/t/ndb_blob.test @@ -2,6 +2,7 @@ --disable_warnings drop table if exists t1; +drop database if exists test2; --enable_warnings # @@ -211,6 +212,31 @@ select * from t1 order by a; alter table t1 drop x; select * from t1 order by a; +# multi db + +create database test2; +use test2; + +CREATE TABLE t2 ( + a bigint unsigned NOT NULL PRIMARY KEY, + b int unsigned not null, + c int unsigned +) engine=ndbcluster; + +insert into t2 values (1,1,1),(2,2,2); +select * from test.t1,t2 where test.t1.a = t2.a order by test.t1.a; + +drop table t2; +use test; + +# alter table + +select * from t1 order by a; +alter table t1 add x int; +select * from t1 order by a; +alter table t1 drop x; +select * from t1 order by a; + # range scan delete delete from t1 where c >= 100; commit; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 02a570b1db3..7fcd0565ae7 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -759,14 +759,6 @@ select * from t1; drop table t1, t2; # -# correct behavoiur for function from reduced subselect -# -create table t1(City VARCHAR(30),Location geometry); -insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)')); -select City from t1 where (select intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 50, 2.5 47, 2 47, 2 50))'))=0); -drop table t1; - -# # reduced subselect in ORDER BY & GROUP BY clauses # @@ -1234,4 +1226,44 @@ select a,b from t1 where match(b) against ('Ball') > 0; select a from t2 where a in (select a from t1 where match(b) against ('Ball') > 0); drop table t1,t2; +# +# BUG#5003 - like in subselect +# +CREATE TABLE t1(`IZAVORGANG_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`KUERZEL` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,`IZAANALYSEART_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`IZAPMKZ_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin); +CREATE INDEX AK01IZAVORGANG ON t1(izaAnalyseart_id,Kuerzel); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000001','601','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000002','602','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000003','603','D0000000001','I0000000001'); +INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000004','101','D0000000001','I0000000001'); +SELECT `IZAVORGANG_ID` FROM t1 WHERE `KUERZEL` IN(SELECT MIN(`KUERZEL`)`Feld1` FROM t1 WHERE `KUERZEL` LIKE'601%'And`IZAANALYSEART_ID`='D0000000001'); +drop table t1; +# +# Optimized IN with compound index +# +CREATE TABLE `t1` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`)); +CREATE TABLE `t2` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`)); +insert into t1 values (1,1),(1,2),(2,1),(2,2); +insert into t2 values (1,2),(2,2); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +alter table t2 drop primary key; +alter table t2 add key KEY1 (aid, bid); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +alter table t2 drop key KEY1; +alter table t2 add primary key (bid, aid); +select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid); +drop table t1,t2; + +# +# resolving fields of grouped outer SELECT +# +CREATE TABLE t1 (howmanyvalues bigint, avalue int); +INSERT INTO t1 VALUES (1, 1),(2, 1),(2, 2),(3, 1),(3, 2),(3, 3),(4, 1),(4, 2),(4, 3),(4, 4); +SELECT howmanyvalues, count(*) from t1 group by howmanyvalues; +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues; +CREATE INDEX t1_howmanyvalues_idx ON t1 (howmanyvalues); +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues+1 = a.howmanyvalues+1) as mycount from t1 a group by a.howmanyvalues; +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues; +-- error 1054 +SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.avalue) as mycount from t1 a group by a.howmanyvalues; +drop table t1; diff --git a/mysql-test/t/subselect_gis.test b/mysql-test/t/subselect_gis.test new file mode 100644 index 00000000000..338051029c4 --- /dev/null +++ b/mysql-test/t/subselect_gis.test @@ -0,0 +1,15 @@ +-- source include/have_geometry.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# correct behavoiur for function from reduced subselect +# +create table t1(City VARCHAR(30),Location geometry); +insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)')); +select City from t1 where (select +intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 +50, 2.5 47, 2 47, 2 50))'))=0); +drop table t1; diff --git a/mysys/hash.c b/mysys/hash.c index 11cbbd6b898..ce25ae89b63 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -52,7 +52,7 @@ _hash_init(HASH *hash,CHARSET_INFO *charset, void (*free_element)(void*),uint flags CALLER_INFO_PROTO) { DBUG_ENTER("hash_init"); - DBUG_PRINT("enter",("hash: %lx size: %d",hash,size)); + DBUG_PRINT("enter",("hash: 0x%lx size: %d",hash,size)); hash->records=0; if (my_init_dynamic_array_ci(&hash->array,sizeof(HASH_LINK),size,0)) @@ -565,7 +565,7 @@ my_bool hash_check(HASH *hash) if ((rec_link=hash_rec_mask(hash,hash_info,blength,records)) != i) { DBUG_PRINT("error", - ("Record in wrong link at %d: Start %d Record: %lx Record-link %d", idx,i,hash_info->data,rec_link)); + ("Record in wrong link at %d: Start %d Record: 0x%lx Record-link %d", idx,i,hash_info->data,rec_link)); error=1; } else diff --git a/mysys/list.c b/mysys/list.c index 17028e8e183..64fca10dc0b 100644 --- a/mysys/list.c +++ b/mysys/list.c @@ -28,7 +28,7 @@ LIST *list_add(LIST *root, LIST *element) { DBUG_ENTER("list_add"); - DBUG_PRINT("enter",("root: %lx element: %lx", root, element)); + DBUG_PRINT("enter",("root: 0x%lx element: %lx", root, element)); if (root) { if (root->prev) /* If add in mid of list */ diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c index f16f2b7ab72..f109df912f1 100644 --- a/mysys/mf_iocache.c +++ b/mysys/mf_iocache.c @@ -140,7 +140,7 @@ int init_io_cache(IO_CACHE *info, File file, uint cachesize, uint min_cache; my_off_t end_of_file= ~(my_off_t) 0; DBUG_ENTER("init_io_cache"); - DBUG_PRINT("enter",("cache: %lx type: %d pos: %ld", + DBUG_PRINT("enter",("cache: 0x%lx type: %d pos: %ld", (ulong) info, (int) type, (ulong) seek_offset)); info->file= file; @@ -290,7 +290,7 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type, pbool clear_cache) { DBUG_ENTER("reinit_io_cache"); - DBUG_PRINT("enter",("cache: %lx type: %d seek_offset: %lu clear_cache: %d", + DBUG_PRINT("enter",("cache: 0x%lx type: %d seek_offset: %lu clear_cache: %d", (ulong) info, type, (ulong) seek_offset, (int) clear_cache)); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index de3bfc5d30b..11aadbed6c1 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -401,8 +401,8 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, keycache->waiting_for_hash_link.last_thread= NULL; keycache->waiting_for_block.last_thread= NULL; DBUG_PRINT("exit", - ("disk_blocks: %d block_root: %lx hash_entries: %d\ - hash_root: %lx hash_links: %d hash_link_root %lx", + ("disk_blocks: %d block_root: 0x%lx hash_entries: %d\ + hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx", keycache->disk_blocks, keycache->block_root, keycache->hash_entries, keycache->hash_root, keycache->hash_links, keycache->hash_link_root)); @@ -596,7 +596,7 @@ void change_key_cache_param(KEY_CACHE *keycache, uint division_limit, void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) { DBUG_ENTER("end_key_cache"); - DBUG_PRINT("enter", ("key_cache: %lx", keycache)); + DBUG_PRINT("enter", ("key_cache: 0x%lx", keycache)); if (!keycache->key_cache_inited) DBUG_VOID_RETURN; @@ -1109,7 +1109,7 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link) { - KEYCACHE_DBUG_PRINT("unlink_hash", ("file %u, filepos %lu #requests=%u", + KEYCACHE_DBUG_PRINT("unlink_hash", ("fd: %u pos_ %lu #requests=%u", (uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests)); KEYCACHE_DBUG_ASSERT(hash_link->requests == 0); if ((*hash_link->prev= hash_link->next)) @@ -1167,7 +1167,7 @@ static HASH_LINK *get_hash_link(KEY_CACHE *keycache, int cnt; #endif - KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu", + KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu", (uint) file,(ulong) filepos)); restart: @@ -1193,7 +1193,7 @@ restart: for (i=0, hash_link= *start ; i < cnt ; i++, hash_link= hash_link->next) { - KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu", + KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu", (uint) hash_link->file,(ulong) hash_link->diskpos)); } } @@ -1285,10 +1285,11 @@ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache, DBUG_ENTER("find_key_block"); KEYCACHE_THREAD_TRACE("find_key_block:begin"); - DBUG_PRINT("enter", ("file %u, filepos %lu, wrmode %lu", - (uint) file, (ulong) filepos, (uint) wrmode)); - KEYCACHE_DBUG_PRINT("find_key_block", ("file %u, filepos %lu, wrmode %lu", - (uint) file, (ulong) filepos, (uint) wrmode)); + DBUG_PRINT("enter", ("fd: %u pos %lu wrmode: %lu", + (uint) file, (ulong) filepos, (uint) wrmode)); + KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %u pos: %lu wrmode: %lu", + (uint) file, (ulong) filepos, + (uint) wrmode)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2", test_key_cache(keycache, "start of find_key_block", 0);); @@ -1542,7 +1543,7 @@ restart: KEYCACHE_DBUG_ASSERT(page_status != -1); *page_st=page_status; KEYCACHE_DBUG_PRINT("find_key_block", - ("file %u, filepos %lu, page_status %lu", + ("fd: %u pos %lu page_status %lu", (uint) file,(ulong) filepos,(uint) page_status)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) @@ -1678,7 +1679,7 @@ byte *key_cache_read(KEY_CACHE *keycache, uint offset= 0; byte *start= buff; DBUG_ENTER("key_cache_read"); - DBUG_PRINT("enter", ("file %u, filepos %lu, length %u", + DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u", (uint) file, (ulong) filepos, length)); if (keycache->can_be_used) @@ -1814,7 +1815,7 @@ int key_cache_insert(KEY_CACHE *keycache, byte *buff, uint length) { DBUG_ENTER("key_cache_insert"); - DBUG_PRINT("enter", ("file %u, filepos %lu, length %u", + DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u", (uint) file,(ulong) filepos, length)); if (keycache->can_be_used) @@ -1926,7 +1927,7 @@ int key_cache_write(KEY_CACHE *keycache, int error=0; DBUG_ENTER("key_cache_write"); DBUG_PRINT("enter", - ("file %u filepos %lu length %u block_length %u key_block_length: %u", + ("fd: %u pos: %lu length: %u block_length: %u key_block_length: %u", (uint) file, (ulong) filepos, length, block_length, keycache ? keycache->key_cache_block_size : 0)); @@ -2396,7 +2397,7 @@ int flush_key_blocks(KEY_CACHE *keycache, { int res; DBUG_ENTER("flush_key_blocks"); - DBUG_PRINT("enter", ("keycache: %lx", keycache)); + DBUG_PRINT("enter", ("keycache: 0x%lx", keycache)); if (keycache->disk_blocks <= 0) DBUG_RETURN(0); diff --git a/mysys/mf_keycaches.c b/mysys/mf_keycaches.c index 806f83dc7d8..20465f3d23b 100644 --- a/mysys/mf_keycaches.c +++ b/mysys/mf_keycaches.c @@ -159,7 +159,7 @@ static byte *safe_hash_search(SAFE_HASH *hash, const byte *key, uint length) result= hash->default_value; else result= ((SAFE_HASH_ENTRY*) result)->data; - DBUG_PRINT("exit",("data: %lx", result)); + DBUG_PRINT("exit",("data: 0x%lx", result)); DBUG_RETURN(result); } @@ -190,7 +190,7 @@ static my_bool safe_hash_set(SAFE_HASH *hash, const byte *key, uint length, SAFE_HASH_ENTRY *entry; my_bool error= 0; DBUG_ENTER("safe_hash_set"); - DBUG_PRINT("enter",("key: %.*s data: %lx", length, key, data)); + DBUG_PRINT("enter",("key: %.*s data: 0x%lx", length, key, data)); rw_wrlock(&hash->mutex); entry= (SAFE_HASH_ENTRY*) hash_search(&hash->hash, key, length); diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c index 34a03391bc4..c9784ddc9a0 100644 --- a/mysys/my_alloc.c +++ b/mysys/my_alloc.c @@ -26,7 +26,7 @@ void init_alloc_root(MEM_ROOT *mem_root, uint block_size, uint pre_alloc_size __attribute__((unused))) { DBUG_ENTER("init_alloc_root"); - DBUG_PRINT("enter",("root: %lx", mem_root)); + DBUG_PRINT("enter",("root: 0x%lx", mem_root)); mem_root->free= mem_root->used= mem_root->pre_alloc= 0; mem_root->min_malloc= 32; mem_root->block_size= block_size-MALLOC_OVERHEAD-sizeof(USED_MEM)-8; @@ -121,7 +121,7 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size) #if defined(HAVE_purify) && defined(EXTRA_DEBUG) reg1 USED_MEM *next; DBUG_ENTER("alloc_root"); - DBUG_PRINT("enter",("root: %lx", mem_root)); + DBUG_PRINT("enter",("root: 0x%lx", mem_root)); Size+=ALIGN_SIZE(sizeof(USED_MEM)); if (!(next = (USED_MEM*) my_malloc(Size,MYF(MY_WME)))) @@ -222,7 +222,7 @@ void free_root(MEM_ROOT *root, myf MyFlags) { reg1 USED_MEM *next,*old; DBUG_ENTER("free_root"); - DBUG_PRINT("enter",("root: %lx flags: %u", root, (uint) MyFlags)); + DBUG_PRINT("enter",("root: 0x%lx flags: %u", root, (uint) MyFlags)); if (!root) /* QQ: Should be deleted */ DBUG_VOID_RETURN; /* purecov: inspected */ diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index 8906a288b11..e918b7b0de2 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -54,7 +54,7 @@ FILE *my_fopen(const char *FileName, int Flags, myf MyFlags) my_stream_opened++; my_file_info[fileno(fd)].type = STREAM_BY_FOPEN; pthread_mutex_unlock(&THR_LOCK_open); - DBUG_PRINT("exit",("stream: %lx",fd)); + DBUG_PRINT("exit",("stream: 0x%lx",fd)); DBUG_RETURN(fd); } pthread_mutex_unlock(&THR_LOCK_open); @@ -78,7 +78,7 @@ int my_fclose(FILE *fd, myf MyFlags) { int err,file; DBUG_ENTER("my_fclose"); - DBUG_PRINT("my",("stream: %lx MyFlags: %d",fd, MyFlags)); + DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",fd, MyFlags)); pthread_mutex_lock(&THR_LOCK_open); file=fileno(fd); @@ -138,7 +138,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags) pthread_mutex_unlock(&THR_LOCK_open); } - DBUG_PRINT("exit",("stream: %lx",fd)); + DBUG_PRINT("exit",("stream: 0x%lx",fd)); DBUG_RETURN(fd); } /* my_fdopen */ diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c index 94f3aaf3464..00fe5c7a009 100644 --- a/mysys/my_fstream.c +++ b/mysys/my_fstream.c @@ -39,7 +39,7 @@ uint my_fread(FILE *stream, byte *Buffer, uint Count, myf MyFlags) { uint readbytes; DBUG_ENTER("my_fread"); - DBUG_PRINT("my",("stream: %lx Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d", stream, Buffer, Count, MyFlags)); if ((readbytes = (uint) fread(Buffer,sizeof(char),(size_t) Count,stream)) @@ -80,7 +80,7 @@ uint my_fwrite(FILE *stream, const byte *Buffer, uint Count, myf MyFlags) uint errors; #endif DBUG_ENTER("my_fwrite"); - DBUG_PRINT("my",("stream: %lx Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d", stream, Buffer, Count, MyFlags)); #if !defined(NO_BACKGROUND) && defined(USE_MY_STREAM) @@ -150,7 +150,7 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence, myf MyFlags __attribute__((unused))) { DBUG_ENTER("my_fseek"); - DBUG_PRINT("my",("stream: %lx pos: %lu whence: %d MyFlags: %d", + DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %d", stream, pos, whence, MyFlags)); DBUG_RETURN(fseek(stream, (off_t) pos, whence) ? MY_FILEPOS_ERROR : (my_off_t) ftell(stream)); @@ -164,7 +164,7 @@ my_off_t my_ftell(FILE *stream, myf MyFlags __attribute__((unused))) { off_t pos; DBUG_ENTER("my_ftell"); - DBUG_PRINT("my",("stream: %lx MyFlags: %d",stream, MyFlags)); + DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",stream, MyFlags)); pos=ftell(stream); DBUG_PRINT("exit",("ftell: %lu",(ulong) pos)); DBUG_RETURN((my_off_t) pos); diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c index fd47c532cff..d6f647254e8 100644 --- a/mysys/my_getwd.c +++ b/mysys/my_getwd.c @@ -45,7 +45,7 @@ int my_getwd(my_string buf, uint size, myf MyFlags) { my_string pos; DBUG_ENTER("my_getwd"); - DBUG_PRINT("my",("buf: %lx size: %d MyFlags %d", buf,size,MyFlags)); + DBUG_PRINT("my",("buf: 0x%lx size: %d MyFlags %d", buf,size,MyFlags)); #if ! defined(MSDOS) if (curr_dir[0]) /* Current pos is saved here */ diff --git a/mysys/my_lib.c b/mysys/my_lib.c index 055e00d2efc..c3b0b57e549 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -613,7 +613,7 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags) { int m_used; DBUG_ENTER("my_stat"); - DBUG_PRINT("my", ("path: '%s', stat_area: %lx, MyFlags: %d", path, + DBUG_PRINT("my", ("path: '%s', stat_area: 0x%lx, MyFlags: %d", path, (byte *) stat_area, my_flags)); if ((m_used= (stat_area == NULL))) diff --git a/mysys/my_lwrite.c b/mysys/my_lwrite.c index e1a3decd053..3b9afdbd71f 100644 --- a/mysys/my_lwrite.c +++ b/mysys/my_lwrite.c @@ -23,7 +23,7 @@ uint32 my_lwrite(int Filedes, const byte *Buffer, uint32 Count, myf MyFlags) { uint32 writenbytes; DBUG_ENTER("my_lwrite"); - DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %ld MyFlags: %d", + DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %ld MyFlags: %d", Filedes, Buffer, Count, MyFlags)); /* Temp hack to get count to int32 while write wants int */ diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c index df9fe1f9bc4..3f601a42dc9 100644 --- a/mysys/my_malloc.c +++ b/mysys/my_malloc.c @@ -44,7 +44,7 @@ gptr my_malloc(unsigned int size, myf my_flags) } else if (my_flags & MY_ZEROFILL) bzero(point,size); - DBUG_PRINT("exit",("ptr: %lx",point)); + DBUG_PRINT("exit",("ptr: 0x%lx",point)); DBUG_RETURN(point); } /* my_malloc */ @@ -55,7 +55,7 @@ gptr my_malloc(unsigned int size, myf my_flags) void my_no_flags_free(gptr ptr) { DBUG_ENTER("my_free"); - DBUG_PRINT("my",("ptr: %lx",ptr)); + DBUG_PRINT("my",("ptr: 0x%lx",ptr)); if (ptr) free(ptr); DBUG_VOID_RETURN; diff --git a/mysys/my_pread.c b/mysys/my_pread.c index 661ef48ab3e..6a55a3cd8de 100644 --- a/mysys/my_pread.c +++ b/mysys/my_pread.c @@ -29,7 +29,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset, uint readbytes; int error; DBUG_ENTER("my_pread"); - DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %u MyFlags: %d", Filedes, (ulong) offset, Buffer, Count, MyFlags)); for (;;) @@ -82,7 +82,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset, uint writenbytes,errors; ulong written; DBUG_ENTER("my_pwrite"); - DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: %lx Count: %d MyFlags: %d", + DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %d MyFlags: %d", Filedes, (ulong) offset,Buffer, Count, MyFlags)); errors=0; written=0L; diff --git a/mysys/my_read.c b/mysys/my_read.c index b7621ac99eb..9de070e772d 100644 --- a/mysys/my_read.c +++ b/mysys/my_read.c @@ -38,7 +38,7 @@ uint my_read(File Filedes, byte *Buffer, uint Count, myf MyFlags) { uint readbytes,save_count; DBUG_ENTER("my_read"); - DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", Filedes, Buffer, Count, MyFlags)); save_count=Count; diff --git a/mysys/my_realloc.c b/mysys/my_realloc.c index 5190fa75dce..c8edb172890 100644 --- a/mysys/my_realloc.c +++ b/mysys/my_realloc.c @@ -27,7 +27,7 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags) { gptr point; DBUG_ENTER("my_realloc"); - DBUG_PRINT("my",("ptr: %lx size: %u my_flags: %d",oldpoint, size, + DBUG_PRINT("my",("ptr: 0x%lx size: %u my_flags: %d",oldpoint, size, my_flags)); if (!oldpoint && (my_flags & MY_ALLOW_ZERO_PTR)) @@ -60,6 +60,6 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags) my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG), size); } #endif - DBUG_PRINT("exit",("ptr: %lx",point)); + DBUG_PRINT("exit",("ptr: 0x%lx",point)); DBUG_RETURN(point); } /* my_realloc */ diff --git a/mysys/my_write.c b/mysys/my_write.c index 61fd6097e28..37d885f04cd 100644 --- a/mysys/my_write.c +++ b/mysys/my_write.c @@ -26,7 +26,7 @@ uint my_write(int Filedes, const byte *Buffer, uint Count, myf MyFlags) uint writenbytes,errors; ulong written; DBUG_ENTER("my_write"); - DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %d MyFlags: %d", + DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %d MyFlags: %d", Filedes, Buffer, Count, MyFlags)); errors=0; written=0L; diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index 07c40fd91b6..6cdf98c5f5f 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -194,7 +194,7 @@ gptr _mymalloc(uint size, const char *filename, uint lineno, myf MyFlags) if ((MyFlags & MY_ZEROFILL) || !sf_malloc_quick) bfill(data, size, (char) (MyFlags & MY_ZEROFILL ? 0 : ALLOC_VAL)); /* Return a pointer to the real data */ - DBUG_PRINT("exit",("ptr: %lx", data)); + DBUG_PRINT("exit",("ptr: 0x%lx", data)); if (sf_min_adress > data) sf_min_adress= data; if (sf_max_adress < data) @@ -259,7 +259,7 @@ void _myfree(gptr ptr, const char *filename, uint lineno, myf myflags) { struct st_irem *irem; DBUG_ENTER("_myfree"); - DBUG_PRINT("enter",("ptr: %lx", ptr)); + DBUG_PRINT("enter",("ptr: 0x%lx", ptr)); if (!sf_malloc_quick) (void) _sanity (filename, lineno); @@ -446,7 +446,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename, irem->filename, irem->linenum); fprintf(stderr, " discovered at %s:%d\n", filename, lineno); (void) fflush(stderr); - DBUG_PRINT("safe",("Underrun at %lx, allocated at %s:%d", + DBUG_PRINT("safe",("Underrun at 0x%lx, allocated at %s:%d", data, irem->filename, irem->linenum)); flag=1; } @@ -462,7 +462,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename, irem->filename, irem->linenum); fprintf(stderr, " discovered at '%s:%d'\n", filename, lineno); (void) fflush(stderr); - DBUG_PRINT("safe",("Overrun at %lx, allocated at %s:%d", + DBUG_PRINT("safe",("Overrun at 0x%lx, allocated at %s:%d", data, irem->filename, irem->linenum)); diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c index 84a8e779ae1..caef1caaf3d 100644 --- a/mysys/thr_alarm.c +++ b/mysys/thr_alarm.c @@ -257,9 +257,9 @@ void thr_end_alarm(thr_alarm_t *alarmed) if (!found) { if (*alarmed) - fprintf(stderr,"Warning: Didn't find alarm %lx in queue of %d alarms\n", + fprintf(stderr,"Warning: Didn't find alarm 0x%lx in queue of %d alarms\n", (long) *alarmed, alarm_queue.elements); - DBUG_PRINT("warning",("Didn't find alarm %lx in queue\n", + DBUG_PRINT("warning",("Didn't find alarm 0x%lx in queue\n", (long) *alarmed)); } pthread_mutex_unlock(&LOCK_alarm); diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index 0e3ccfc0452..d9e46fe1beb 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -435,7 +435,7 @@ int thr_lock(THR_LOCK_DATA *data,enum thr_lock_type lock_type) data->thread=pthread_self(); /* Must be reset ! */ data->thread_id=my_thread_id(); /* Must be reset ! */ VOID(pthread_mutex_lock(&lock->mutex)); - DBUG_PRINT("lock",("data: %lx thread: %ld lock: %lx type: %d", + DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d", data,data->thread_id,lock,(int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock",0); @@ -656,7 +656,7 @@ void thr_unlock(THR_LOCK_DATA *data) THR_LOCK *lock=data->lock; enum thr_lock_type lock_type=data->type; DBUG_ENTER("thr_unlock"); - DBUG_PRINT("lock",("data: %lx thread: %ld lock: %lx", + DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx", data,data->thread_id,lock)); pthread_mutex_lock(&lock->mutex); check_locks(lock,"start of release lock",0); @@ -827,7 +827,7 @@ int thr_multi_lock(THR_LOCK_DATA **data,uint count) { THR_LOCK_DATA **pos,**end; DBUG_ENTER("thr_multi_lock"); - DBUG_PRINT("lock",("data: %lx count: %d",data,count)); + DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count)); if (count > 1) sort_locks(data,count); /* lock everything */ @@ -839,7 +839,7 @@ int thr_multi_lock(THR_LOCK_DATA **data,uint count) DBUG_RETURN(1); } #ifdef MAIN - printf("Thread: %s Got lock: %lx type: %d\n",my_thread_name(), + printf("Thread: %s Got lock: 0x%lx type: %d\n",my_thread_name(), (long) pos[0]->lock, pos[0]->type); fflush(stdout); #endif } @@ -899,12 +899,12 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count) { THR_LOCK_DATA **pos,**end; DBUG_ENTER("thr_multi_unlock"); - DBUG_PRINT("lock",("data: %lx count: %d",data,count)); + DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count)); for (pos=data,end=data+count; pos < end ; pos++) { #ifdef MAIN - printf("Thread: %s Rel lock: %lx type: %d\n", + printf("Thread: %s Rel lock: 0x%lx type: %d\n", my_thread_name(), (long) pos[0]->lock, pos[0]->type); fflush(stdout); #endif @@ -912,7 +912,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count) thr_unlock(*pos); else { - DBUG_PRINT("lock",("Free lock: data: %lx thread: %ld lock: %lx", + DBUG_PRINT("lock",("Free lock: data: 0x%lx thread: %ld lock: 0x%lx", *pos,(*pos)->thread_id,(*pos)->lock)); } } @@ -1098,7 +1098,7 @@ static void thr_print_lock(const char* name,struct st_lock_list *list) prev= &list->data; for (data=list->data; data && count++ < MAX_LOCKS ; data=data->next) { - printf("%lx (%lu:%d); ",(ulong) data,data->thread_id,(int) data->type); + printf("0x%lx (%lu:%d); ",(ulong) data,data->thread_id,(int) data->type); if (data->prev != prev) printf("\nWarning: prev didn't point at previous lock\n"); prev= &data->next; @@ -1120,7 +1120,7 @@ void thr_print_locks(void) { THR_LOCK *lock=(THR_LOCK*) list->data; VOID(pthread_mutex_lock(&lock->mutex)); - printf("lock: %lx:",(ulong) lock); + printf("lock: 0x%lx:",(ulong) lock); if ((lock->write_wait.data || lock->read_wait.data) && (! lock->read.data && ! lock->write.data)) printf(" WARNING: "); diff --git a/mysys/thr_mutex.c b/mysys/thr_mutex.c index 8ebe5be22e8..bbcfaa8bba6 100644 --- a/mysys/thr_mutex.c +++ b/mysys/thr_mutex.c @@ -210,7 +210,7 @@ int safe_cond_wait(pthread_cond_t *cond, safe_mutex_t *mp, const char *file, if (mp->count++) { fprintf(stderr, - "safe_mutex: Count was %d in thread %lx when locking mutex at %s, line %d\n", + "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d\n", mp->count-1, my_thread_id(), file, line); fflush(stderr); abort(); @@ -248,7 +248,7 @@ int safe_cond_timedwait(pthread_cond_t *cond, safe_mutex_t *mp, if (mp->count++) { fprintf(stderr, - "safe_mutex: Count was %d in thread %lx when locking mutex at %s, line %d (error: %d (%d))\n", + "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d (error: %d (%d))\n", mp->count-1, my_thread_id(), file, line, error, error); fflush(stderr); abort(); diff --git a/mysys/tree.c b/mysys/tree.c index 063c8739e58..bec1ec680f1 100644 --- a/mysys/tree.c +++ b/mysys/tree.c @@ -89,7 +89,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit, tree_element_free free_element, void *custom_arg) { DBUG_ENTER("init_tree"); - DBUG_PRINT("enter",("tree: %lx size: %d",tree,size)); + DBUG_PRINT("enter",("tree: 0x%lx size: %d",tree,size)); if (default_alloc_size < DEFAULT_ALLOC_SIZE) default_alloc_size= DEFAULT_ALLOC_SIZE; @@ -137,7 +137,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit, static void free_tree(TREE *tree, myf free_flags) { DBUG_ENTER("free_tree"); - DBUG_PRINT("enter",("tree: %lx",tree)); + DBUG_PRINT("enter",("tree: 0x%lx",tree)); if (tree->root) /* If initialized */ { diff --git a/mysys/typelib.c b/mysys/typelib.c index 9aaf97d143f..90a093b0b32 100644 --- a/mysys/typelib.c +++ b/mysys/typelib.c @@ -49,7 +49,7 @@ int find_type(my_string x, TYPELIB *typelib, uint full_name) reg1 my_string i; reg2 const char *j; DBUG_ENTER("find_type"); - DBUG_PRINT("enter",("x: '%s' lib: %lx",x,typelib)); + DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib)); if (!typelib->count) { diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/ndb/include/ndbapi/NdbBlob.hpp index af4c049d4a7..dc47115d16f 100644 --- a/ndb/include/ndbapi/NdbBlob.hpp +++ b/ndb/include/ndbapi/NdbBlob.hpp @@ -234,14 +234,13 @@ private: // define blob table static void getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c); static void getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c); - // table name - char theBlobTableName[BlobTableNameSize]; // ndb api stuff Ndb* theNdb; NdbConnection* theNdbCon; NdbOperation* theNdbOp; NdbTableImpl* theTable; NdbTableImpl* theAccessTable; + NdbTableImpl* theBlobTable; const NdbColumnImpl* theColumn; char theFillChar; // sizes diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index a69ec355ce8..b5c3985c6cb 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -144,6 +144,8 @@ public: FragAllLarge = 4 ///< Eight fragments per node group. }; }; + + class Table; // forward declaration /** * @class Column @@ -365,6 +367,8 @@ public: void setIndexOnlyStorage(bool); bool getIndexOnlyStorage() const; + const Table * getBlobTable() const; + /** * @name ODBC Specific methods * @{ diff --git a/ndb/src/common/portlib/Makefile.am b/ndb/src/common/portlib/Makefile.am index e6ecb30fe04..6f3a3fe01a9 100644 --- a/ndb/src/common/portlib/Makefile.am +++ b/ndb/src/common/portlib/Makefile.am @@ -4,7 +4,7 @@ noinst_LTLIBRARIES = libportlib.la libportlib_la_SOURCES = \ NdbCondition.c NdbMutex.c NdbSleep.c NdbTick.c \ - NdbEnv.c NdbThread.c NdbHost.c NdbTCP.c \ + NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp \ NdbDaemon.c NdbMem.c include $(top_srcdir)/ndb/config/common.mk.am diff --git a/ndb/src/common/portlib/NdbTCP.c b/ndb/src/common/portlib/NdbTCP.cpp index 287dc6c2ecd..4bf4936aa30 100644 --- a/ndb/src/common/portlib/NdbTCP.c +++ b/ndb/src/common/portlib/NdbTCP.cpp @@ -16,14 +16,15 @@ #include <NdbMutex.h> -#include "NdbTCP.h" +#include <NdbTCP.h> -#ifdef NDB_WIN32 +#if defined NDB_WIN32 || defined SCO static NdbMutex & LOCK_gethostbyname = * NdbMutex_Create(); #else static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER; #endif +extern "C" int Ndb_getInAddr(struct in_addr * dst, const char *address) { struct hostent * hostPtr; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index a34f89b2119..caa548e5f07 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1401,6 +1401,7 @@ void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref) void Dbdih::execREAD_NODESCONF(Signal* signal) { + unsigned i; ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; jamEntry(); Uint32 nodeArray[MAX_NDB_NODES]; @@ -1408,9 +1409,10 @@ void Dbdih::execREAD_NODESCONF(Signal* signal) csystemnodes = readNodes->noOfNodes; cmasterNodeId = readNodes->masterNodeId; int index = 0; - for (unsigned i = 1; i < MAX_NDB_NODES; i++){ + NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes); + for (i = 1; i < MAX_NDB_NODES; i++){ jam(); - if(NodeBitmask::get(readNodes->allNodes, i)){ + if(tmp.get(i)){ jam(); nodeArray[index] = i; if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){ @@ -1420,6 +1422,32 @@ void Dbdih::execREAD_NODESCONF(Signal* signal) index++; }//if }//for + + if(cstarttype == NodeState::ST_SYSTEM_RESTART || + cstarttype == NodeState::ST_NODE_RESTART){ + + for(i = 1; i<MAX_NDB_NODES; i++){ + const Uint32 stat = Sysfile::getNodeStatus(i, SYSFILE->nodeStatus); + if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){ + jam(); + continue; + } + + if(tmp.get(i) && stat != Sysfile::NS_NotDefined){ + jam(); + continue; + } + char buf[255]; + snprintf(buf, sizeof(buf), + "Illegal configuration change." + " Initial start needs to be performed " + " when changing no of storage nodes (node %d)", i); + progError(__LINE__, + ERR_INVALID_CONFIG, + buf); + } + } + ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES); if (cstarttype == NodeState::ST_INITIAL_START) { jam(); @@ -3451,10 +3479,37 @@ void Dbdih::selectMasterCandidateAndSend(Signal* signal) }//if }//for ndbrequire(masterCandidateId != 0); + setNodeGroups(); signal->theData[0] = masterCandidateId; signal->theData[1] = gci; sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB); - setNodeGroups(); + + Uint32 node_groups[MAX_NDB_NODES]; + memset(node_groups, 0, sizeof(node_groups)); + for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { + jam(); + const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups); + if(ng != NO_NODE_GROUP_ID){ + ndbrequire(ng < MAX_NDB_NODES); + node_groups[ng]++; + } + } + + for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { + jam(); + Uint32 count = node_groups[nodePtr.i]; + if(count != 0 && count != cnoReplicas){ + char buf[255]; + snprintf(buf, sizeof(buf), + "Illegal configuration change." + " Initial start needs to be performed " + " when changing no of replicas (%d != %d)", + node_groups[nodePtr.i], cnoReplicas); + progError(__LINE__, + ERR_INVALID_CONFIG, + buf); + } + } }//Dbdih::selectMasterCandidate() /* ------------------------------------------------------------------------- */ diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 8b9568fd12d..7175952aed0 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -649,9 +649,10 @@ CommandInterpreter::executeShow(char* parameters) } int - ndb_nodes = 0, - api_nodes = 0, - mgm_nodes = 0; + master_id= 0, + ndb_nodes= 0, + api_nodes= 0, + mgm_nodes= 0; for(i=0; i < state->no_of_nodes; i++) { switch(state->node_states[i].node_type) { @@ -659,6 +660,8 @@ CommandInterpreter::executeShow(char* parameters) api_nodes++; break; case NDB_MGM_NODE_TYPE_NDB: + if (state->node_states[i].dynamic_id > master_id) + master_id= state->node_states[i].dynamic_id; ndb_nodes++; break; case NDB_MGM_NODE_TYPE_MGM: @@ -681,8 +684,11 @@ CommandInterpreter::executeShow(char* parameters) ndbout << " (Version: " << getMajor(state->node_states[i].version) << "." << getMinor(state->node_states[i].version) << "." - << getBuild(state->node_states[i].version) << ")" << endl; - + << getBuild(state->node_states[i].version) << "," + << " Nodegroup: " << state->node_states[i].node_group; + if (state->node_states[i].dynamic_id == master_id) + ndbout << ", Master"; + ndbout << ")" << endl; } else { ndbout << " (not connected) " << endl; @@ -692,13 +698,13 @@ CommandInterpreter::executeShow(char* parameters) } ndbout << endl; - ndbout << api_nodes - << " API Node(s)" + ndbout << mgm_nodes + << " MGM Node(s)" << endl; for(i=0; i < state->no_of_nodes; i++) { - if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) { - ndbout << "API node:\t" << state->node_states[i].node_id; + if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) { + ndbout << "MGM node:\t" << state->node_states[i].node_id; if(state->node_states[i].version != 0) { ndbout << " (Version: " << getMajor(state->node_states[i].version) << "." @@ -707,19 +713,19 @@ CommandInterpreter::executeShow(char* parameters) } else { - ndbout << " (not connected) " << endl; + ndbout << " (no version information available) " << endl; } } } ndbout << endl; - - ndbout << mgm_nodes - << " MGM Node(s)" + + ndbout << api_nodes + << " API Node(s)" << endl; for(i=0; i < state->no_of_nodes; i++) { - if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) { - ndbout << "MGM node:\t" << state->node_states[i].node_id; + if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) { + ndbout << "API node:\t" << state->node_states[i].node_id; if(state->node_states[i].version != 0) { ndbout << " (Version: " << getMajor(state->node_states[i].version) << "." @@ -728,11 +734,12 @@ CommandInterpreter::executeShow(char* parameters) } else { - ndbout << " (no version information available) " << endl; + ndbout << " (not connected) " << endl; } } } ndbout << endl; + // ndbout << helpTextShow; return; } else if (strcmp(parameters, "PROPERTIES") == 0 || diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp index 431be574bbf..65e1aeedda7 100644 --- a/ndb/src/ndbapi/NdbBlob.cpp +++ b/ndb/src/ndbapi/NdbBlob.cpp @@ -140,7 +140,6 @@ void NdbBlob::init() { theState = Idle; - theBlobTableName[0] = 0; theNdb = NULL; theNdbCon = NULL; theNdbOp = NULL; @@ -865,7 +864,7 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count) DBG("readParts part=" << part << " count=" << count); Uint32 n = 0; while (n < count) { - NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName); + NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || tOp->readTuple() == -1 || setPartKeyValue(tOp, part + n) == -1 || @@ -887,7 +886,7 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count) DBG("insertParts part=" << part << " count=" << count); Uint32 n = 0; while (n < count) { - NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName); + NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || tOp->insertTuple() == -1 || setPartKeyValue(tOp, part + n) == -1 || @@ -909,7 +908,7 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count) DBG("updateParts part=" << part << " count=" << count); Uint32 n = 0; while (n < count) { - NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName); + NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || tOp->updateTuple() == -1 || setPartKeyValue(tOp, part + n) == -1 || @@ -931,7 +930,7 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count) DBG("deleteParts part=" << part << " count=" << count); Uint32 n = 0; while (n < count) { - NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName); + NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || tOp->deleteTuple() == -1 || setPartKeyValue(tOp, part + n) == -1) { @@ -1029,12 +1028,11 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* // sanity check assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head)); assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize); - getBlobTableName(theBlobTableName, theTable, theColumn); const NdbDictionary::Table* bt; const NdbDictionary::Column* bc; if (thePartSize > 0) { if (theStripeSize == 0 || - (bt = theNdb->theDictionary->getTable(theBlobTableName)) == NULL || + (bt = theColumn->getBlobTable()) == NULL || (bc = bt->getColumn("DATA")) == NULL || bc->getType() != partType || bc->getLength() != (int)thePartSize) { @@ -1042,6 +1040,7 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* return -1; } } + theBlobTable = & NdbTableImpl::getImpl(*bt); // buffers theKeyBuf.alloc(theTable->m_sizeOfKeysInWords << 2); theAccessKeyBuf.alloc(theAccessTable->m_sizeOfKeysInWords << 2); diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index d5a16546071..4b30f41b51d 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -174,6 +174,14 @@ NdbDictionary::Column::getIndexOnlyStorage() const { return m_impl.m_indexOnly; } +const NdbDictionary::Table * +NdbDictionary::Column::getBlobTable() const { + NdbTableImpl * t = m_impl.m_blobTable; + if (t) + return t->m_facade; + return 0; +} + void NdbDictionary::Column::setAutoIncrement(bool val){ m_impl.m_autoIncrement = val; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 45a9f300aab..cb5e3b3c821 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -79,6 +79,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col) m_attrSize = col.m_attrSize; m_arraySize = col.m_arraySize; m_keyInfoPos = col.m_keyInfoPos; + m_blobTable = col.m_blobTable; // Do not copy m_facade !! return *this; @@ -104,6 +105,7 @@ NdbColumnImpl::init() m_arraySize = 1, m_autoIncrement = false; m_autoIncrementInitialValue = 1; + m_blobTable = NULL; } NdbColumnImpl::~NdbColumnImpl() @@ -1211,7 +1213,6 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, } if (col->getBlobType()) blobCount++; - NdbColumnImpl * null = 0; impl->m_columns.fill(attrDesc.AttributeId, null); if(impl->m_columns[attrDesc.AttributeId] != 0){ @@ -1266,7 +1267,28 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t) NdbBlob::getBlobTable(bt, &t, &c); if (createTable(bt) != 0) return -1; + // Save BLOB table handle + NdbTableImpl * cachedBlobTable = getTable(bt.m_externalName.c_str()); + c.m_blobTable = cachedBlobTable; } + + return 0; +} + +int +NdbDictionaryImpl::addBlobTables(NdbTableImpl &t) +{ + for (unsigned i = 0; i < t.m_columns.size(); i++) { + NdbColumnImpl & c = *t.m_columns[i]; + if (! c.getBlobType() || c.getPartSize() == 0) + continue; + char btname[NdbBlob::BlobTableNameSize]; + NdbBlob::getBlobTableName(btname, &t, &c); + // Save BLOB table handle + NdbTableImpl * cachedBlobTable = getTable(btname);; + c.m_blobTable = cachedBlobTable; + } + return 0; } diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 3bf7eef3a06..9a890f02575 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -71,6 +71,7 @@ public: bool m_autoIncrement; Uint64 m_autoIncrementInitialValue; BaseString m_defaultValue; + NdbTableImpl * m_blobTable; /** * Internal types and sizes, and aggregates @@ -362,6 +363,7 @@ public: int createTable(NdbTableImpl &t); int createBlobTables(NdbTableImpl &); + int addBlobTables(NdbTableImpl &); int alterTable(NdbTableImpl &t); int dropTable(const char * name); int dropTable(NdbTableImpl &); @@ -616,7 +618,6 @@ NdbDictionaryImpl::getTableImpl(const char * internalTableName) if (ret == 0){ ret = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames()); - m_globalHash->lock(); m_globalHash->put(internalTableName, ret); m_globalHash->unlock(); @@ -629,6 +630,8 @@ NdbDictionaryImpl::getTableImpl(const char * internalTableName) m_ndb.theFirstTupleId[ret->getTableId()] = ~0; m_ndb.theLastTupleId[ret->getTableId()] = ~0; + + addBlobTables(*ret); return ret; } diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index 641919d771b..be168ddffbe 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -43,7 +43,7 @@ static int theNoOfNdbObjects = 0; static char *ndbConnectString = 0; -#ifdef NDB_WIN32 +#if defined NDB_WIN32 || defined SCO static NdbMutex & createNdbMutex = * NdbMutex_Create(); #else static NdbMutex createNdbMutex = NDB_MUTEX_INITIALIZER; diff --git a/sql-common/client.c b/sql-common/client.c index 68878df50e8..1941e6bc517 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -133,6 +133,7 @@ static void mysql_close_free(MYSQL *mysql); static int wait_for_data(my_socket fd, uint timeout); #endif + /**************************************************************************** A modified version of connect(). my_connect() allows you to specify a timeout value, in seconds, that we should wait until we diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 855e92d6648..fcfa2efef61 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -17,6 +17,8 @@ #include <my_time.h> #include <m_string.h> #include <m_ctype.h> +/* Windows version of localtime_r() is declared in my_ptrhead.h */ +#include <my_pthread.h> ulonglong log_10_int[20]= { diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index 9b439087259..c004330932c 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -192,7 +192,7 @@ static int free_share(ARCHIVE_SHARE *share) thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); if (gzclose(share->archive_write) == Z_ERRNO) - rc= -1; + rc= 1; my_free((gptr) share, MYF(0)); } pthread_mutex_unlock(&archive_mutex); @@ -226,7 +226,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked) if ((archive= gzopen(share->data_file_name, "rb")) == NULL) { (void)free_share(share); //We void since we already have an error - DBUG_RETURN(-1); + DBUG_RETURN(errno ? errno : -1); } DBUG_RETURN(0); @@ -234,56 +234,91 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked) /* - Closes the file. We first close this storage engines file handle to the - archive and then remove our reference count to the table (and possibly - free it as well). - */ + Closes the file. + + SYNOPSIS + close(); + + IMPLEMENTATION: + + We first close this storage engines file handle to the archive and + then remove our reference count to the table (and possibly free it + as well). + + RETURN + 0 ok + 1 Error +*/ + int ha_archive::close(void) { + int rc= 0; DBUG_ENTER("ha_archive::close"); - DBUG_RETURN(((gzclose(archive) == Z_ERRNO || free_share(share)) ? -1 : 0)); + + /* First close stream */ + if (gzclose(archive) == Z_ERRNO) + rc= 1; + /* then also close share */ + rc|= free_share(share); + + DBUG_RETURN(rc); } /* - We create our data file here. The format is pretty simple. The first bytes in - any file are the version number. Currently we do nothing with this, but in - the future this gives us the ability to figure out version if we change the - format at all. After the version we starting writing our rows. Unlike other - storage engines we do not "pack" our data. Since we are about to do a general - compression, packing would just be a waste of CPU time. If the table has blobs - they are written after the row in the order of creation. + We create our data file here. The format is pretty simple. The first + bytes in any file are the version number. Currently we do nothing + with this, but in the future this gives us the ability to figure out + version if we change the format at all. After the version we + starting writing our rows. Unlike other storage engines we do not + "pack" our data. Since we are about to do a general compression, + packing would just be a waste of CPU time. If the table has blobs + they are written after the row in the order of creation. + So to read a row we: Read the version Read the record and copy it into buf Loop through any blobs and read them - */ -int ha_archive::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) +*/ + +int ha_archive::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) { File create_file; char name_buff[FN_REFLEN]; size_t written; + int error; DBUG_ENTER("ha_archive::create"); - if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, - O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) - DBUG_RETURN(-1); + if ((create_file= my_create(fn_format(name_buff,name,"",ARZ, + MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, + O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) + { + error= my_errno; + goto err; + } if ((archive= gzdopen(create_file, "ab")) == NULL) { + error= errno; delete_table(name); - DBUG_RETURN(-1); + goto err; } version= ARCHIVE_VERSION; written= gzwrite(archive, &version, sizeof(version)); - if (written != sizeof(version) || gzclose(archive)) + if (gzclose(archive) || written != sizeof(version)) { + error= errno; delete_table(name); - DBUG_RETURN(-1); + goto err; } - DBUG_RETURN(0); + +err: + /* Return error number, if we got one */ + DBUG_RETURN(error ? error : -1); } + /* Look at ha_archive::open() for an explanation of the row format. Here we just write out the row. @@ -298,9 +333,9 @@ int ha_archive::write_row(byte * buf) if (table->timestamp_default_now) update_timestamp(buf+table->timestamp_default_now-1); written= gzwrite(share->archive_write, buf, table->reclength); - share->dirty= true; + share->dirty= TRUE; if (written != table->reclength) - DBUG_RETURN(-1); + DBUG_RETURN(errno ? errno : -1); for (Field_blob **field=table->blob_field ; *field ; field++) { @@ -310,7 +345,7 @@ int ha_archive::write_row(byte * buf) (*field)->get_ptr(&ptr); written= gzwrite(share->archive_write, ptr, (unsigned)size); if (written != size) - DBUG_RETURN(-1); + DBUG_RETURN(errno ? errno : -1); } DBUG_RETURN(0); @@ -322,6 +357,7 @@ int ha_archive::write_row(byte * buf) that it is a table scan we rewind the file to the beginning, otherwise we assume the position will be set. */ + int ha_archive::rnd_init(bool scan) { DBUG_ENTER("ha_archive::rnd_init"); @@ -339,10 +375,10 @@ int ha_archive::rnd_init(bool scan) If dirty, we lock, and then reset/flush the data. I found that just calling gzflush() doesn't always work. */ - if (share->dirty == true) + if (share->dirty == TRUE) { pthread_mutex_lock(&share->mutex); - if (share->dirty == true) + if (share->dirty == TRUE) { /* I was having problems with OSX, but it worked for 10.3 so I am wrapping this with and ifdef */ #ifdef BROKEN_GZFLUSH @@ -350,12 +386,12 @@ int ha_archive::rnd_init(bool scan) if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) { pthread_mutex_unlock(&share->mutex); - DBUG_RETURN(-1); + DBUG_RETURN(errno ? errno : -1); } #else gzflush(share->archive_write, Z_SYNC_FLUSH); #endif - share->dirty= false; + share->dirty= FALSE; } pthread_mutex_unlock(&share->mutex); } @@ -367,8 +403,8 @@ int ha_archive::rnd_init(bool scan) if (scan) { read= gzread(archive, &version, sizeof(version)); - if (read == 0 || read != sizeof(version)) - DBUG_RETURN(-1); + if (read != sizeof(version)) + DBUG_RETURN(errno ? errno : -1); } DBUG_RETURN(0); @@ -393,7 +429,7 @@ int ha_archive::get_row(byte *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); /* If the record is the wrong size, the file is probably damaged */ - if (read != table->reclength) + if ((ulong) read != table->reclength) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* Calculate blob length, we use this for our buffer */ @@ -409,7 +445,7 @@ int ha_archive::get_row(byte *buf) { size_t size= (*field)->get_length(); read= gzread(archive, last, size); - if (read == 0 || read != size) + if ((size_t) read != size) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); (*field)->set_ptr(size, last); last += size; @@ -417,19 +453,21 @@ int ha_archive::get_row(byte *buf) DBUG_RETURN(0); } + /* Called during ORDER BY. Its position is either from being called sequentially or by having had ha_archive::rnd_pos() called before it is called. */ + int ha_archive::rnd_next(byte *buf) { - DBUG_ENTER("ha_archive::rnd_next"); int rc; + DBUG_ENTER("ha_archive::rnd_next"); statistic_increment(ha_read_rnd_next_count,&LOCK_status); current_position= gztell(archive); rc= get_row(buf); - if (!(HA_ERR_END_OF_FILE == rc)) + if (rc != HA_ERR_END_OF_FILE) records++; DBUG_RETURN(rc); @@ -450,10 +488,12 @@ void ha_archive::position(const byte *record) /* - This is called after a table scan for each row if the results of the scan need - to be ordered. It will take *pos and use it to move the cursor in the file so - that the next row that is called is the correctly ordered row. + This is called after a table scan for each row if the results of the + scan need to be ordered. It will take *pos and use it to move the + cursor in the file so that the next row that is called is the + correctly ordered row. */ + int ha_archive::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_archive::rnd_pos"); @@ -568,11 +608,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, return to; } -ha_rows ha_archive::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) +ha_rows ha_archive::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { DBUG_ENTER("ha_archive::records_in_range "); DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index 2fab80f0598..f08353a5d6c 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -86,7 +86,8 @@ public: */ virtual double scan_time() { return (double) (records) / 20.0+10; } /* The next method will never be called */ - virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; } + virtual double read_time(uint index, uint ranges, ha_rows rows) + { return (double) rows / 20.0+1; } int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); @@ -109,10 +110,7 @@ public: int extra(enum ha_extra_function operation); int reset(void); int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, diff --git a/sql/item.cc b/sql/item.cc index c85aee724cb..11d618748b3 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -60,10 +60,10 @@ Item::Item(): */ if (thd->lex->current_select) { - SELECT_LEX_NODE::enum_parsing_place place= + enum_parsing_place place= thd->lex->current_select->parsing_place; - if (place == SELECT_LEX_NODE::SELECT_LIST || - place == SELECT_LEX_NODE::IN_HAVING) + if (place == SELECT_LIST || + place == IN_HAVING) thd->lex->current_select->select_n_having_items++; } } @@ -1233,21 +1233,34 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) table_list= (last= sl)->get_table_list(); if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) { - // it is primary INSERT st_select_lex => skip first table resolving + /* + it is primary INSERT st_select_lex => skip first table + resolving + */ table_list= table_list->next; } Item_subselect *prev_subselect_item= prev_unit->item; - if ((tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) - { - if (!tmp) - return -1; - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; - } + enum_parsing_place place= + prev_subselect_item->parsing_place; + /* + check table fields only if subquery used somewhere out of HAVING + or SELECT list or outer SELECT do not use groupping (i.e. tables + are accessable) + */ + if (((place != IN_HAVING && + place != SELECT_LIST) || + (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && + (tmp= find_field_in_tables(thd, this, + table_list, &where, + 0)) != not_found_field) + { + if (!tmp) + return -1; + prev_subselect_item->used_tables_cache|= tmp->table->map; + prev_subselect_item->const_item_cache= 0; + break; + } if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && (refer= find_item_in_list(this, sl->item_list, &counter, REPORT_EXCEPT_NOT_FOUND)) != @@ -1906,16 +1919,25 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) // it is primary INSERT st_select_lex => skip first table resolving table_list= table_list->next; } - if ((tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) - { - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; - } - - // Reference is not found => depend from outer (or just error) + enum_parsing_place place= + prev_subselect_item->parsing_place; + /* + check table fields only if subquery used somewhere out of HAVING + or SELECT list or outer SELECT do not use groupping (i.e. tables + are accessable) + */ + if (((place != IN_HAVING && + place != SELECT_LIST) || + (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && + (tmp= find_field_in_tables(thd, this, + table_list, &where, + 0)) != not_found_field) + { + prev_subselect_item->used_tables_cache|= tmp->table->map; + prev_subselect_item->const_item_cache= 0; + break; + } + // Reference is not found => depend from outer (or just error) prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; prev_subselect_item->const_item_cache= 0; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 68bc144d518..8d140efac5f 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -63,12 +63,21 @@ void Item_subselect::init(st_select_lex *select_lex, => we do not copy old_engine here */ engine= unit->item->engine; + parsing_place= unit->item->parsing_place; unit->item->engine= 0; unit->item= this; engine->change_item(this, result); } else { + SELECT_LEX *outer_select= unit->outer_select(); + /* + do not take into account expression inside aggregate functions because + they can access original table fields + */ + parsing_place= (outer_select->in_sum_expr ? + NO_MATTER : + outer_select->parsing_place); if (select_lex->next_select()) engine= new subselect_union_engine(unit, result, this); else @@ -76,7 +85,7 @@ void Item_subselect::init(st_select_lex *select_lex, } { SELECT_LEX *upper= unit->outer_select(); - if (upper->parsing_place == SELECT_LEX_NODE::IN_HAVING) + if (upper->parsing_place == IN_HAVING) upper->subquery_in_having= 1; } DBUG_VOID_RETURN; @@ -1245,29 +1254,31 @@ int subselect_uniquesubquery_engine::exec() DBUG_ENTER("subselect_uniquesubquery_engine::exec"); int error; TABLE *table= tab->table; - if ((tab->ref.key_err= (*tab->ref.key_copy)->copy())) + for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) { - table->status= STATUS_NOT_FOUND; - error= -1; + if (tab->ref.key_err= (*copy)->copy()) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(1); + } } + + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + error= table->file->index_read(table->record[0], + tab->ref.key_buff, + tab->ref.key_length,HA_READ_KEY_EXACT); + if (error && error != HA_ERR_KEY_NOT_FOUND) + error= report_error(table, error); else { - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key); - error= table->file->index_read(table->record[0], - tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); - if (error && error != HA_ERR_KEY_NOT_FOUND) - error= report_error(table, error); - else - { - error= 0; - table->null_row= 0; - ((Item_in_subselect *) item)->value= (!table->status && - (!cond || cond->val_int()) ? 1 : - 0); - } + error= 0; + table->null_row= 0; + ((Item_in_subselect *) item)->value= (!table->status && + (!cond || cond->val_int()) ? 1 : + 0); } + DBUG_RETURN(error != 0); } @@ -1295,55 +1306,56 @@ int subselect_indexsubquery_engine::exec() ((Item_in_subselect *) item)->was_null= 0; } - if ((*tab->ref.key_copy) && (tab->ref.key_err= (*tab->ref.key_copy)->copy())) + for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) { - table->status= STATUS_NOT_FOUND; - error= -1; + if (tab->ref.key_err= (*copy)->copy()) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(1); + } } + + if (!table->file->inited) + table->file->ha_index_init(tab->ref.key); + error= table->file->index_read(table->record[0], + tab->ref.key_buff, + tab->ref.key_length,HA_READ_KEY_EXACT); + if (error && error != HA_ERR_KEY_NOT_FOUND) + error= report_error(table, error); else { - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key); - error= table->file->index_read(table->record[0], - tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); - if (error && error != HA_ERR_KEY_NOT_FOUND) - error= report_error(table, error); - else + for (;;) { - for (;;) + error= 0; + table->null_row= 0; + if (!table->status) { - error= 0; - table->null_row= 0; - if (!table->status) - { - if (!cond || cond->val_int()) - { - if (null_finding) - ((Item_in_subselect *) item)->was_null= 1; - else - ((Item_in_subselect *) item)->value= 1; - break; - } - error= table->file->index_next_same(table->record[0], - tab->ref.key_buff, - tab->ref.key_length); - if (error && error != HA_ERR_END_OF_FILE) - { - error= report_error(table, error); - break; - } - } - else - { - if (!check_null || null_finding) - break; /* We don't need to check nulls */ - *tab->ref.null_ref_key= 1; - null_finding= 1; - /* Check if there exists a row with a null value in the index */ - if ((error= (safe_index_read(tab) == 1))) - break; - } + if (!cond || cond->val_int()) + { + if (null_finding) + ((Item_in_subselect *) item)->was_null= 1; + else + ((Item_in_subselect *) item)->value= 1; + break; + } + error= table->file->index_next_same(table->record[0], + tab->ref.key_buff, + tab->ref.key_length); + if (error && error != HA_ERR_END_OF_FILE) + { + error= report_error(table, error); + break; + } + } + else + { + if (!check_null || null_finding) + break; /* We don't need to check nulls */ + *tab->ref.null_ref_key= 1; + null_finding= 1; + /* Check if there exists a row with a null value in the index */ + if ((error= (safe_index_read(tab) == 1))) + break; } } } diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 1ce3144f660..f570d89f28f 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -48,6 +48,8 @@ protected: table_map used_tables_cache; /* allowed number of columns (1 for single value subqueries) */ uint max_columns; + /* where subquery is placed */ + enum_parsing_place parsing_place; /* work with 'substitution' */ bool have_to_be_excluded; /* cache of constant state */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6644a45668b..2037cf48616 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -293,6 +293,13 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); */ #define MAX_DATE_REP_LENGTH 30 +enum enum_parsing_place +{ + NO_MATTER, + IN_HAVING, + SELECT_LIST +}; + struct st_table; class THD; class Item_arena; diff --git a/sql/records.cc b/sql/records.cc index 94634d30759..5a969ef9c20 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -101,6 +101,9 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, else if (select && select->quick) { DBUG_PRINT("info",("using rr_quick")); + + if (!table->file->inited) + table->file->ha_index_init(select->quick->index); info->read_record=rr_quick; } else if (table->sort.record_pointers) diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt index 772e3e387d6..ee75210d4fe 100644 --- a/sql/share/czech/errmsg.txt +++ b/sql/share/czech/errmsg.txt @@ -313,4 +313,4 @@ character-set=latin2 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt index 91fdb82fe59..408f86b0445 100644 --- a/sql/share/danish/errmsg.txt +++ b/sql/share/danish/errmsg.txt @@ -307,4 +307,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt index 41678ae67aa..95af6aaa01f 100644 --- a/sql/share/dutch/errmsg.txt +++ b/sql/share/dutch/errmsg.txt @@ -315,4 +315,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt index c34bf1c0403..5ad23b92a5a 100644 --- a/sql/share/english/errmsg.txt +++ b/sql/share/english/errmsg.txt @@ -304,4 +304,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt index d3bb306f00a..36e0b8409e9 100644 --- a/sql/share/estonian/errmsg.txt +++ b/sql/share/estonian/errmsg.txt @@ -309,4 +309,4 @@ character-set=latin7 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt index 49a1065a5ca..3bd6835908e 100644 --- a/sql/share/french/errmsg.txt +++ b/sql/share/french/errmsg.txt @@ -304,4 +304,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt index 56e6454ab29..bf5a36a887a 100644 --- a/sql/share/german/errmsg.txt +++ b/sql/share/german/errmsg.txt @@ -316,4 +316,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt index dd83db9907c..9703bad11a1 100644 --- a/sql/share/greek/errmsg.txt +++ b/sql/share/greek/errmsg.txt @@ -304,4 +304,4 @@ character-set=greek "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt index 23c6cffbcb8..1f71086ff69 100644 --- a/sql/share/hungarian/errmsg.txt +++ b/sql/share/hungarian/errmsg.txt @@ -306,4 +306,4 @@ character-set=latin2 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt index 1ae152fff8f..21158fcb567 100644 --- a/sql/share/italian/errmsg.txt +++ b/sql/share/italian/errmsg.txt @@ -304,4 +304,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt index fbdd63f1ede..3a6dd644d8b 100644 --- a/sql/share/japanese/errmsg.txt +++ b/sql/share/japanese/errmsg.txt @@ -306,4 +306,4 @@ character-set=ujis "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt index 6e98cd61541..356f0a63540 100644 --- a/sql/share/korean/errmsg.txt +++ b/sql/share/korean/errmsg.txt @@ -304,4 +304,4 @@ character-set=euckr "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt index 517c041a355..b5564cb264e 100644 --- a/sql/share/norwegian-ny/errmsg.txt +++ b/sql/share/norwegian-ny/errmsg.txt @@ -306,4 +306,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt index b5cf4a7df19..fcea45b06ac 100644 --- a/sql/share/norwegian/errmsg.txt +++ b/sql/share/norwegian/errmsg.txt @@ -306,4 +306,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt index be152eed9b2..2a18e4de020 100644 --- a/sql/share/polish/errmsg.txt +++ b/sql/share/polish/errmsg.txt @@ -308,4 +308,4 @@ character-set=latin2 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt index 729883c7a79..6ba0fbca014 100644 --- a/sql/share/portuguese/errmsg.txt +++ b/sql/share/portuguese/errmsg.txt @@ -305,4 +305,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt index c1f3abc9c3d..50b2b36c959 100644 --- a/sql/share/romanian/errmsg.txt +++ b/sql/share/romanian/errmsg.txt @@ -308,4 +308,4 @@ character-set=latin2 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt index ecc8fc6e408..d8641d1dd14 100644 --- a/sql/share/russian/errmsg.txt +++ b/sql/share/russian/errmsg.txt @@ -306,4 +306,4 @@ character-set=koi8r "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt index a4c8ea3713a..a8cde5a56b1 100644 --- a/sql/share/serbian/errmsg.txt +++ b/sql/share/serbian/errmsg.txt @@ -310,4 +310,4 @@ character-set=cp1250 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt index b616db6235c..42ef7f62076 100644 --- a/sql/share/slovak/errmsg.txt +++ b/sql/share/slovak/errmsg.txt @@ -312,4 +312,4 @@ character-set=latin2 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt index 0231e83fbec..b82712be350 100644 --- a/sql/share/spanish/errmsg.txt +++ b/sql/share/spanish/errmsg.txt @@ -306,4 +306,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt index a227de3b991..78620b28a2f 100644 --- a/sql/share/swedish/errmsg.txt +++ b/sql/share/swedish/errmsg.txt @@ -304,4 +304,4 @@ character-set=latin1 "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt index f68e709471c..6d07eb1a656 100644 --- a/sql/share/ukrainian/errmsg.txt +++ b/sql/share/ukrainian/errmsg.txt @@ -309,4 +309,4 @@ character-set=koi8u "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%d) - truncated" +"Result of %s() was larger than max_allowed_packet (%ld) - truncated" diff --git a/sql/slave.cc b/sql/slave.cc index 7fb7fbdade4..cb37a798037 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3056,6 +3056,9 @@ dump"); } thd->proc_info= "Waiting to reconnect after a failed binlog dump request"; +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif end_server(mysql); /* First time retry immediately, assuming that we can recover @@ -3129,6 +3132,9 @@ max_allowed_packet", goto err; } thd->proc_info = "Waiting to reconnect after a failed master event read"; +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif end_server(mysql); if (retry_count++) { @@ -4383,4 +4389,5 @@ template class I_List_iterator<i_string>; template class I_List_iterator<i_string_pair>; #endif + #endif /* HAVE_REPLICATION */ diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 23fef44c964..ff7dc805119 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1303,7 +1303,7 @@ int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) Item_arena::Item_arena(THD* thd) :free_list(0), - state(INITIALIZED) + state((int)INITIALIZED) { init_sql_alloc(&mem_root, thd->variables.query_alloc_block_size, @@ -1315,7 +1315,7 @@ Item_arena::Item_arena(THD* thd) Item_arena::Item_arena() :free_list(0), - state(CONVENTIONAL_EXECUTION) + state((int)CONVENTIONAL_EXECUTION) { clear_alloc_root(&mem_root); } @@ -1323,7 +1323,7 @@ Item_arena::Item_arena() Item_arena::Item_arena(bool init_mem_root) :free_list(0), - state(INITIALIZED) + state((int)INITIALIZED) { if (init_mem_root) clear_alloc_root(&mem_root); diff --git a/sql/sql_class.h b/sql/sql_class.h index 83fdb4c7d76..713609b3d32 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -427,8 +427,12 @@ public: */ Item *free_list; MEM_ROOT mem_root; - static const int INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, - CONVENTIONAL_EXECUTION= 2, ERROR= -1; + enum + { + INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2, + ERROR= -1 + }; + int state; /* We build without RTTI, so dynamic_cast can't be used. */ @@ -443,8 +447,8 @@ public: virtual Type type() const; virtual ~Item_arena(); - inline bool is_stmt_prepare() const { return state < PREPARED; } - inline bool is_first_stmt_execute() const { return state == PREPARED; } + inline bool is_stmt_prepare() const { return state < (int)PREPARED; } + inline bool is_first_stmt_execute() const { return state == (int)PREPARED; } inline gptr alloc(unsigned int size) { return alloc_root(&mem_root,size); } inline gptr calloc(unsigned int size) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index be1b7c3377e..ccbc015533b 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1017,7 +1017,7 @@ void st_select_lex::init_query() select_n_having_items= 0; prep_where= 0; subquery_in_having= explicit_limit= 0; - parsing_place= SELECT_LEX_NODE::NO_MATTER; + parsing_place= NO_MATTER; } void st_select_lex::init_select() diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 053c85166f6..da2c3fba097 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -220,12 +220,6 @@ protected: *master, *slave, /* vertical links */ *link_next, **link_prev; /* list of whole SELECT_LEX */ public: - enum enum_parsing_place - { - NO_MATTER, - IN_HAVING, - SELECT_LIST - }; ulong options; /* diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 850d41a030b..94b6ab103da 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -132,7 +132,7 @@ find_prepared_statement(THD *thd, ulong id, const char *where, { Statement *stmt= thd->stmt_map.find(id); - if (stmt == 0 || stmt->type() != Item_arena::PREPARED_STATEMENT) + if (stmt == 0 || stmt->type() != (int)Item_arena::PREPARED_STATEMENT) { char llbuf[22]; my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), 22, llstr(id, llbuf), where); @@ -1619,7 +1619,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, { sl->prep_where= sl->where; } - stmt->state= Prepared_statement::PREPARED; + stmt->state= (int)Prepared_statement::PREPARED; } DBUG_RETURN(!stmt); @@ -1733,7 +1733,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) DBUG_PRINT("exec_query:", ("%s", stmt->query)); /* Check if we got an error when sending long data */ - if (stmt->state == Item_arena::ERROR) + if (stmt->state == (int)Item_arena::ERROR) { send_error(thd, stmt->last_errno, stmt->last_error); DBUG_VOID_RETURN; @@ -1850,7 +1850,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, transformations of the query tree (i.e. negations elimination). This should be done permanently on the parse tree of this statement. */ - if (stmt->state == Item_arena::PREPARED) + if (stmt->state == (int)Item_arena::PREPARED) thd->current_arena= stmt; if (!(specialflag & SPECIAL_NO_PRIOR)) @@ -1863,10 +1863,10 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, /* Free Items that were created during this execution of the PS. */ free_items(thd->free_list); thd->free_list= 0; - if (stmt->state == Item_arena::PREPARED) + if (stmt->state == (int)Item_arena::PREPARED) { thd->current_arena= thd; - stmt->state= Item_arena::EXECUTED; + stmt->state= (int)Item_arena::EXECUTED; } cleanup_items(stmt->free_list); reset_stmt_params(stmt); @@ -1905,7 +1905,7 @@ void mysql_stmt_reset(THD *thd, char *packet) SEND_ERROR))) DBUG_VOID_RETURN; - stmt->state= Item_arena::PREPARED; + stmt->state= (int)Item_arena::PREPARED; /* Clear parameters from data which could be set by @@ -1993,7 +1993,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) if (param_number >= stmt->param_count) { /* Error will be sent in execute call */ - stmt->state= Item_arena::ERROR; + stmt->state= (int)Item_arena::ERROR; stmt->last_errno= ER_WRONG_ARGUMENTS; sprintf(stmt->last_error, ER(ER_WRONG_ARGUMENTS), "mysql_stmt_send_long_data"); @@ -2009,7 +2009,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) if (param->set_longdata(thd->extra_data, thd->extra_length)) #endif { - stmt->state= Item_arena::ERROR; + stmt->state= (int)Item_arena::ERROR; stmt->last_errno= ER_OUTOFMEMORY; sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ca17f246929..279a56b9e58 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3880,7 +3880,11 @@ JOIN::join_free(bool full) if (full) { group_fields.delete_elements(); - tmp_table_param.copy_funcs.delete_elements(); + /* + We can't call delete_elements() on copy_funcs as this will cause + problems in free_elements() as some of the elements are then deleted. + */ + tmp_table_param.copy_funcs.empty(); tmp_table_param.cleanup(); } DBUG_VOID_RETURN; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index b0b92178198..03777daa9b0 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3281,7 +3281,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, ha_rows *deleted) { int error; - Copy_field *copy,*copy_end, *next_field; + Copy_field *copy,*copy_end, *next_field= 0; ulong found_count,delete_count; THD *thd= current_thd; uint length; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1b091c26a6d..9cc39fe5104 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1113,11 +1113,11 @@ create_select: lex->sql_command= SQLCOM_REPLACE_SELECT; lex->current_select->table_list.save_and_clear(&lex->save_list); mysql_init_select(lex); - lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST; + lex->current_select->parsing_place= SELECT_LIST; } select_options select_item_list { - Select->parsing_place= SELECT_LEX_NODE::NO_MATTER; + Select->parsing_place= NO_MATTER; } opt_select_from { Lex->current_select->table_list.push_front(&Lex->save_list); } @@ -2376,11 +2376,11 @@ select_part2: lex->lock_option= TL_READ; if (sel->linkage != UNION_TYPE) mysql_init_select(lex); - lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST; + lex->current_select->parsing_place= SELECT_LIST; } select_options select_item_list { - Select->parsing_place= SELECT_LEX_NODE::NO_MATTER; + Select->parsing_place= NO_MATTER; } select_into select_lock_type; @@ -3444,11 +3444,11 @@ select_derived: YYABORT; mysql_init_select(lex); lex->current_select->linkage= DERIVED_TABLE_TYPE; - lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST; + lex->current_select->parsing_place= SELECT_LIST; } select_options select_item_list { - Select->parsing_place= SELECT_LEX_NODE::NO_MATTER; + Select->parsing_place= NO_MATTER; } opt_select_from union_opt ; @@ -3578,13 +3578,13 @@ having_clause: /* empty */ | HAVING { - Select->parsing_place= SELECT_LEX_NODE::IN_HAVING; + Select->parsing_place= IN_HAVING; } expr { SELECT_LEX *sel= Select; sel->having= $3; - sel->parsing_place= SELECT_LEX_NODE::NO_MATTER; + sel->parsing_place= NO_MATTER; if ($3) $3->top_level_item(); } @@ -4819,7 +4819,7 @@ simple_ident: ident { SELECT_LEX *sel=Select; - $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING || + $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(0,0, NullS,NullS,$1.str); @@ -4835,7 +4835,7 @@ simple_ident: ER(ER_TABLENAME_NOT_ALLOWED_HERE), MYF(0), $1.str, thd->where); } - $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING || + $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(0,0,NullS,$1.str,$3.str); @@ -4851,7 +4851,7 @@ simple_ident: ER(ER_TABLENAME_NOT_ALLOWED_HERE), MYF(0), $2.str, thd->where); } - $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING || + $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(0,0,NullS,$2.str,$4.str); @@ -4867,7 +4867,7 @@ simple_ident: ER(ER_TABLENAME_NOT_ALLOWED_HERE), MYF(0), $3.str, thd->where); } - $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING || + $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? (Item*) new Item_field((YYTHD->client_capabilities & CLIENT_NO_SCHEMA ? NullS : $1.str), |