diff options
128 files changed, 7419 insertions, 3015 deletions
diff --git a/configure.in b/configure.in index b7efc13e5fc..93d6cbe515e 100644 --- a/configure.in +++ b/configure.in @@ -2678,12 +2678,6 @@ then fi AC_SUBST([ndb_port]) -if test X"$ndb_port_base" = Xdefault -then - ndb_port_base="2202" -fi -AC_SUBST([ndb_port_base]) - ndb_transporter_opt_objs="" if test "$ac_cv_func_shmget" = "yes" && test "$ac_cv_func_shmat" = "yes" && diff --git a/include/mysql_com.h b/include/mysql_com.h index 3165ca4c4c3..094283f8330 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -348,7 +348,7 @@ struct rand_struct { /* The following is for user defined functions */ -enum Item_result {STRING_RESULT, REAL_RESULT, INT_RESULT, ROW_RESULT, +enum Item_result {STRING_RESULT=0, REAL_RESULT, INT_RESULT, ROW_RESULT, DECIMAL_RESULT}; typedef struct st_udf_args diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 795df3623b0..653c9bfbaec 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1028,10 +1028,8 @@ sub ndbcluster_install () { } mtr_report("Install ndbcluster"); my $ndbcluster_opts= $opt_bench ? "" : "--small"; - my $ndbcluster_port_base= $opt_ndbcluster_port + 2; if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", ["--port=$opt_ndbcluster_port", - "--port-base=$ndbcluster_port_base", "--data-dir=$glob_mysql_test_dir/var", $ndbcluster_opts, "--initial"], @@ -1072,7 +1070,6 @@ sub ndbcluster_stop () { { return; } - my $ndbcluster_port_base= $opt_ndbcluster_port + 2; # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", ["--port=$opt_ndbcluster_port", diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index cee8ca4bfd0..da088a1891f 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -497,8 +497,7 @@ SMALL_SERVER="--key_buffer_size=1M --sort_buffer=256K --max_heap_table_size=1M" export MASTER_MYPORT SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MASTER_MYSOCK1 -NDBCLUSTER_BASE_PORT=`expr $NDBCLUSTER_PORT + 2` -NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --port-base=$NDBCLUSTER_BASE_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS" +NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS" NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log diff --git a/mysql-test/ndb/Makefile.am b/mysql-test/ndb/Makefile.am index 502ccee099e..178e40fb19a 100644 --- a/mysql-test/ndb/Makefile.am +++ b/mysql-test/ndb/Makefile.am @@ -14,7 +14,6 @@ SUFFIXES = .sh @RM@ -f $@ $@-t @SED@ \ -e 's!@''ndb_port''@!$(ndb_port)!g' \ - -e 's!@''ndb_port_base''@!$(ndb_port_base)!g' \ -e 's!@''ndbbindir''@!$(ndbbindir)!g' \ -e 's!@''ndbtoolsdir''@!$(ndbtoolsdir)!g' \ $< > $@-t diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index c831a5c7ffa..a6a56376f33 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -15,6 +15,7 @@ HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress HostName= CHOOSE_HOSTNAME_2 # hostname is a valid network adress [ndb_mgmd] +HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress DataDir= CHOOSE_FILESYSTEM # PortNumber= CHOOSE_PORT_MGM @@ -25,6 +26,3 @@ PortNumber= CHOOSE_PORT_MGM [mysqld] [mysqld] - -[tcp default] -PortNumber= CHOOSE_PORT_TRANSPORTER diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index 16bb3a9b122..c09c013552e 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -6,7 +6,6 @@ # configurable parameters, make sure to change in mysqlcluterd as well port=@ndb_port@ -port_base=@ndb_port_base@ fsdir=`pwd` # end configurable parameters @@ -96,7 +95,7 @@ while test $# -gt 0; do port=`echo "$1" | sed -e "s;--port=;;"` ;; --port-base=*) - port_base=`echo "$1" | sed -e "s;--port-base=;;"` + $ECHO "--port-base option depricated. Ignored." ;; --ndb_mgm-extra-opts=*) NDB_MGM_EXTRA_OPTS=`echo "$1" | sed -e "s;--ndb_mgm-extra-opts=;;"` @@ -192,7 +191,6 @@ if [ $initial_ndb ] ; then -e s,"CHOOSE_HOSTNAME_".*,"$ndb_host",g \ -e s,"CHOOSE_FILESYSTEM","$fs_ndb",g \ -e s,"CHOOSE_PORT_MGM","$ndb_mgmd_port",g \ - -e s,"CHOOSE_PORT_TRANSPORTER","$port_base",g \ < ndb/ndb_config_2_node.ini \ > "$fs_ndb/config.ini" fi diff --git a/mysql-test/r/ndb_condition_pushdown.result b/mysql-test/r/ndb_condition_pushdown.result new file mode 100644 index 00000000000..6990e442899 --- /dev/null +++ b/mysql-test/r/ndb_condition_pushdown.result @@ -0,0 +1,697 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 ( +auto int(5) unsigned NOT NULL auto_increment, +string char(10), +vstring varchar(10), +bin binary(7), +vbin varbinary(7), +tiny tinyint(4) DEFAULT '0' NOT NULL , +short smallint(6) DEFAULT '1' NOT NULL , +medium mediumint(8) DEFAULT '0' NOT NULL, +long_int int(11) DEFAULT '0' NOT NULL, +longlong bigint(13) DEFAULT '0' NOT NULL, +real_float float(13,1) DEFAULT 0.0 NOT NULL, +real_double double(16,4), +real_decimal decimal(16,4), +utiny tinyint(3) unsigned DEFAULT '0' NOT NULL, +ushort smallint(5) unsigned zerofill DEFAULT '00000' NOT NULL, +umedium mediumint(8) unsigned DEFAULT '0' NOT NULL, +ulong int(11) unsigned DEFAULT '0' NOT NULL, +ulonglong bigint(13) unsigned DEFAULT '0' NOT NULL, +bits bit(3), +options enum('zero','one','two','three','four') not null, +flags set('zero','one','two','three','four') not null, +date_field date, +year_field year, +time_field time, +date_time datetime, +time_stamp timestamp, +PRIMARY KEY (auto) +) engine=ndb; +insert into t1 values +(NULL,"aaaa","aaaa",0xAAAA,0xAAAA,-1,-1,-1,-1,-1,1.1,1.1,1.1,1,1,1,1,1, +b'001','one','one', +'1901-01-01','1901', +'01:01:01','1901-01-01 01:01:01',NULL), +(NULL,"bbbb","bbbb",0xBBBB,0xBBBB,-2,-2,-2,-2,-2,2.2,2.2,2.2,2,2,2,2,2, +b'010','two','one,two', +'1902-02-02','1902', +'02:02:02','1902-02-02 02:02:02',NULL), +(NULL,"cccc","cccc",0xCCCC,0xCCCC,-3,-3,-3,-3,-3,3.3,3.3,3.3,3,3,3,3,3, +b'011','three','one,two,three', +'1903-03-03','1903', +'03:03:03','1903-03-03 03:03:03',NULL), +(NULL,"dddd","dddd",0xDDDD,0xDDDD,-4,-4,-4,-4,-4,4.4,4.4,4.4,4,4,4,4,4, +b'100','four','one,two,three,four', +'1904-04-04','1904', +'04:04:04','1904-04-04 04:04:04',NULL); +CREATE TABLE t2 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 int unsigned, attr3 VARCHAR(10) ) ENGINE=ndbcluster; +insert into t2 values (0,0,0, "a"),(1,1,1,"b"),(2,2,NULL,NULL),(3,3,3,"d"),(4,4,4,"e"),(5,5,5,"f"); +CREATE TABLE t3 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 bigint unsigned, attr3 tinyint unsigned, attr4 VARCHAR(10) ) ENGINE=ndbcluster; +insert into t3 values (0,0,0,0,"a"),(1,1,9223372036854775803,1,"b"),(2,2,9223372036854775804,2,"c"),(3,3,9223372036854775805,3,"d"),(4,4,9223372036854775806,4,"e"),(5,5,9223372036854775807,5,"f"); +CREATE TABLE t4 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 bigint unsigned, attr3 tinyint unsigned, attr4 VARCHAR(10) , KEY (attr1)) ENGINE=ndbcluster; +insert into t4 values (0,0,0,0,"a"),(1,1,9223372036854775803,1,"b"),(2,2,9223372036854775804,2,"c"),(3,3,9223372036854775805,3,"d"),(4,4,9223372036854775806,4,"e"),(5,5,9223372036854775807,5,"f"); +set @old_ecpd = @@session.engine_condition_pushdown; +set engine_condition_pushdown = off; +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +bits = b'001' and +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; +auto +1 +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +bits != b'001' and +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +bits > b'001' and +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +bits >= b'001' and +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; +auto +1 +2 +3 +4 +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +bits < b'100' and +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 and +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +bits <= b'100' and +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +4 +select auto from t1 where +string like "b%" and +vstring like "b%" and +bin like concat(0xBB, '%') and +vbin like concat(0xBB, '%') +order by auto; +auto +2 +select auto from t1 where +string not like "b%" and +vstring not like "b%" and +bin not like concat(0xBB, '%') and +vbin not like concat(0xBB, '%') +order by auto; +auto +1 +3 +4 +select * from t2 where attr3 is null or attr1 > 2 and pk1= 3 order by pk1; +pk1 attr1 attr2 attr3 +2 2 NULL NULL +3 3 3 d +select * from t2 where attr3 is not null and attr1 > 2 order by pk1; +pk1 attr1 attr2 attr3 +3 3 3 d +4 4 4 e +5 5 5 f +select * from t3 where attr2 > 9223372036854775803 and attr3 != 3 order by pk1; +pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c +4 4 9223372036854775806 4 e +5 5 9223372036854775807 5 f +select * from t2,t3 where t2.attr1 < 1 and t2.attr2 = t3.attr2 and t3.attr1 < 5 order by t2.pk1; +pk1 attr1 attr2 attr3 pk1 attr1 attr2 attr3 attr4 +0 0 0 a 0 0 0 0 a +select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1; +pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c +4 4 9223372036854775806 4 e +select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1; +pk1 attr1 attr2 attr3 attr4 pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c 2 2 9223372036854775804 2 c +3 3 9223372036854775805 3 d 3 3 9223372036854775805 3 d +4 4 9223372036854775806 4 e 4 4 9223372036854775806 4 e +set engine_condition_pushdown = on; +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +/* bits = b'001' and */ +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; +auto +1 +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +/* bits != b'001' and */ +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +/* bits > b'001' and */ +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +/* bits >= b'001' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; +auto +1 +2 +3 +4 +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +/* bits < b'100' and */ +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 - 1 + 1 and /* Checking function composition */ +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +/* bits <= b'100' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +4 +create index medium_index on t1(medium); +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +/* bits = b'001' and */ +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; +auto +1 +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +/* bits != b'001' and */ +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +/* bits > b'001' and */ +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; +auto +2 +3 +4 +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +/* bits >= b'001' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; +auto +1 +2 +3 +4 +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +/* bits < b'100' and */ +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 - 1 + 1 and /* Checking function composition */ +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +/* bits <= b'100' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; +auto +1 +2 +3 +4 +select auto from t1 where +string like "b%" and +vstring like "b%" and +bin like concat(0xBB, '%') and +vbin like concat(0xBB, '%') +order by auto; +auto +2 +select auto from t1 where +string not like "b%" and +vstring not like "b%" and +bin not like concat(0xBB, '%') and +vbin not like concat(0xBB, '%') +order by auto; +auto +1 +3 +4 +select * from t2 where attr3 is null or attr1 > 2 and pk1= 3 order by pk1; +pk1 attr1 attr2 attr3 +2 2 NULL NULL +3 3 3 d +select * from t2 where attr3 is not null and attr1 > 2 order by pk1; +pk1 attr1 attr2 attr3 +3 3 3 d +4 4 4 e +5 5 5 f +select * from t3 where attr2 > 9223372036854775803 and attr3 != 3 order by pk1; +pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c +4 4 9223372036854775806 4 e +5 5 9223372036854775807 5 f +select * from t2,t3 where t2.attr1 < 1 and t2.attr2 = t3.attr2 and t3.attr1 < 5 order by t2.pk1; +pk1 attr1 attr2 attr3 pk1 attr1 attr2 attr3 attr4 +0 0 0 a 0 0 0 0 a +select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1; +pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c +4 4 9223372036854775806 4 e +select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1; +pk1 attr1 attr2 attr3 attr4 pk1 attr1 attr2 attr3 attr4 +2 2 9223372036854775804 2 c 2 2 9223372036854775804 2 c +3 3 9223372036854775805 3 d 3 3 9223372036854775805 3 d +4 4 9223372036854775806 4 e 4 4 9223372036854775806 4 e +select auto from t1 where string = "aaaa" collate latin1_general_ci order by auto; +auto +1 +select * from t2 where (attr1 < 2) = (attr2 < 2) order by pk1; +pk1 attr1 attr2 attr3 +0 0 0 a +1 1 1 b +3 3 3 d +4 4 4 e +5 5 5 f +set engine_condition_pushdown = @old_ecpd; +DROP TABLE t1,t2,t3,t4; diff --git a/mysql-test/t/ndb_condition_pushdown.test b/mysql-test/t/ndb_condition_pushdown.test new file mode 100644 index 00000000000..5fd9def3327 --- /dev/null +++ b/mysql-test/t/ndb_condition_pushdown.test @@ -0,0 +1,618 @@ +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +# +# Test of condition pushdown to storage engine +# +CREATE TABLE t1 ( + auto int(5) unsigned NOT NULL auto_increment, + string char(10), + vstring varchar(10), + bin binary(7), + vbin varbinary(7), + tiny tinyint(4) DEFAULT '0' NOT NULL , + short smallint(6) DEFAULT '1' NOT NULL , + medium mediumint(8) DEFAULT '0' NOT NULL, + long_int int(11) DEFAULT '0' NOT NULL, + longlong bigint(13) DEFAULT '0' NOT NULL, + real_float float(13,1) DEFAULT 0.0 NOT NULL, + real_double double(16,4), + real_decimal decimal(16,4), + utiny tinyint(3) unsigned DEFAULT '0' NOT NULL, + ushort smallint(5) unsigned zerofill DEFAULT '00000' NOT NULL, + umedium mediumint(8) unsigned DEFAULT '0' NOT NULL, + ulong int(11) unsigned DEFAULT '0' NOT NULL, + ulonglong bigint(13) unsigned DEFAULT '0' NOT NULL, + bits bit(3), + options enum('zero','one','two','three','four') not null, + flags set('zero','one','two','three','four') not null, + date_field date, + year_field year, + time_field time, + date_time datetime, + time_stamp timestamp, + PRIMARY KEY (auto) +) engine=ndb; + +insert into t1 values +(NULL,"aaaa","aaaa",0xAAAA,0xAAAA,-1,-1,-1,-1,-1,1.1,1.1,1.1,1,1,1,1,1, + b'001','one','one', + '1901-01-01','1901', +'01:01:01','1901-01-01 01:01:01',NULL), +(NULL,"bbbb","bbbb",0xBBBB,0xBBBB,-2,-2,-2,-2,-2,2.2,2.2,2.2,2,2,2,2,2, + b'010','two','one,two', + '1902-02-02','1902', +'02:02:02','1902-02-02 02:02:02',NULL), +(NULL,"cccc","cccc",0xCCCC,0xCCCC,-3,-3,-3,-3,-3,3.3,3.3,3.3,3,3,3,3,3, + b'011','three','one,two,three', + '1903-03-03','1903', +'03:03:03','1903-03-03 03:03:03',NULL), +(NULL,"dddd","dddd",0xDDDD,0xDDDD,-4,-4,-4,-4,-4,4.4,4.4,4.4,4,4,4,4,4, + b'100','four','one,two,three,four', + '1904-04-04','1904', +'04:04:04','1904-04-04 04:04:04',NULL); + +CREATE TABLE t2 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 int unsigned, attr3 VARCHAR(10) ) ENGINE=ndbcluster; + +insert into t2 values (0,0,0, "a"),(1,1,1,"b"),(2,2,NULL,NULL),(3,3,3,"d"),(4,4,4,"e"),(5,5,5,"f"); + +CREATE TABLE t3 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 bigint unsigned, attr3 tinyint unsigned, attr4 VARCHAR(10) ) ENGINE=ndbcluster; + +insert into t3 values (0,0,0,0,"a"),(1,1,9223372036854775803,1,"b"),(2,2,9223372036854775804,2,"c"),(3,3,9223372036854775805,3,"d"),(4,4,9223372036854775806,4,"e"),(5,5,9223372036854775807,5,"f"); + +CREATE TABLE t4 (pk1 int unsigned NOT NULL PRIMARY KEY, attr1 int unsigned NOT NULL, attr2 bigint unsigned, attr3 tinyint unsigned, attr4 VARCHAR(10) , KEY (attr1)) ENGINE=ndbcluster; + +insert into t4 values (0,0,0,0,"a"),(1,1,9223372036854775803,1,"b"),(2,2,9223372036854775804,2,"c"),(3,3,9223372036854775805,3,"d"),(4,4,9223372036854775806,4,"e"),(5,5,9223372036854775807,5,"f"); + +set @old_ecpd = @@session.engine_condition_pushdown; +set engine_condition_pushdown = off; + +# Test all types and compare operators +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +bits = b'001' and +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +bits != b'001' and +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +bits > b'001' and +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +bits >= b'001' and +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +bits < b'100' and +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; + +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 and +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +bits <= b'100' and +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; + +# Test LIKE/NOT LIKE +select auto from t1 where +string like "b%" and +vstring like "b%" and +bin like concat(0xBB, '%') and +vbin like concat(0xBB, '%') +order by auto; + +select auto from t1 where +string not like "b%" and +vstring not like "b%" and +bin not like concat(0xBB, '%') and +vbin not like concat(0xBB, '%') +order by auto; + +# Various tests +select * from t2 where attr3 is null or attr1 > 2 and pk1= 3 order by pk1; +select * from t2 where attr3 is not null and attr1 > 2 order by pk1; +select * from t3 where attr2 > 9223372036854775803 and attr3 != 3 order by pk1; +select * from t2,t3 where t2.attr1 < 1 and t2.attr2 = t3.attr2 and t3.attr1 < 5 order by t2.pk1; +select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1; +select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1; + +set engine_condition_pushdown = on; + +# Test all types and compare operators +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +/* bits = b'001' and */ +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +/* bits != b'001' and */ +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +/* bits > b'001' and */ +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +/* bits >= b'001' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +/* bits < b'100' and */ +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; + +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 - 1 + 1 and /* Checking function composition */ +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +/* bits <= b'100' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; + +# Test index scan with filter +create index medium_index on t1(medium); + +# Test all types and compare operators +select auto from t1 where +string = "aaaa" and +vstring = "aaaa" and +bin = 0xAAAA and +vbin = 0xAAAA and +tiny = -1 and +short = -1 and +medium = -1 and +long_int = -1 and +longlong = -1 and +real_float > 1.0 and real_float < 2.0 and +real_double > 1.0 and real_double < 2.0 and +real_decimal > 1.0 and real_decimal < 2.0 and +utiny = 1 and +ushort = 1 and +umedium = 1 and +ulong = 1 and +ulonglong = 1 and +/* bits = b'001' and */ +options = 'one' and +flags = 'one' and +date_field = '1901-01-01' and +year_field = '1901' and +time_field = '01:01:01' and +date_time = '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string != "aaaa" and +vstring != "aaaa" and +bin != 0xAAAA and +vbin != 0xAAAA and +tiny != -1 and +short != -1 and +medium != -1 and +long_int != -1 and +longlong != -1 and +(real_float < 1.0 or real_float > 2.0) and +(real_double < 1.0 or real_double > 2.0) and +(real_decimal < 1.0 or real_decimal > 2.0) and +utiny != 1 and +ushort != 1 and +umedium != 1 and +ulong != 1 and +ulonglong != 1 and +/* bits != b'001' and */ +options != 'one' and +flags != 'one' and +date_field != '1901-01-01' and +year_field != '1901' and +time_field != '01:01:01' and +date_time != '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string > "aaaa" and +vstring > "aaaa" and +bin > 0xAAAA and +vbin > 0xAAAA and +tiny < -1 and +short < -1 and +medium < -1 and +long_int < -1 and +longlong < -1 and +real_float > 1.1 and +real_double > 1.1 and +real_decimal > 1.1 and +utiny > 1 and +ushort > 1 and +umedium > 1 and +ulong > 1 and +ulonglong > 1 and +/* bits > b'001' and */ +(options = 'two' or options = 'three' or options = 'four') and +(flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field > '1901-01-01' and +year_field > '1901' and +time_field > '01:01:01' and +date_time > '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string >= "aaaa" and +vstring >= "aaaa" and +bin >= 0xAAAA and +vbin >= 0xAAAA and +tiny <= -1 and +short <= -1 and +medium <= -1 and +long_int <= -1 and +longlong <= -1 and +real_float >= 1.0 and +real_double >= 1.0 and +real_decimal >= 1.0 and +utiny >= 1 and +ushort >= 1 and +umedium >= 1 and +ulong >= 1 and +ulonglong >= 1 and +/* bits >= b'001' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field >= '1901-01-01' and +year_field >= '1901' and +time_field >= '01:01:01' and +date_time >= '1901-01-01 01:01:01' +order by auto; + +select auto from t1 where +string < "dddd" and +vstring < "dddd" and +bin < 0xDDDD and +vbin < 0xDDDD and +tiny > -4 and +short > -4 and +medium > -4 and +long_int > -4 and +longlong > -4 and +real_float < 4.4 and +real_double < 4.4 and +real_decimal < 4.4 and +utiny < 4 and +ushort < 4 and +umedium < 4 and +ulong < 4 and +ulonglong < 4 and +/* bits < b'100' and */ +(options = 'one' or options = 'two' or options = 'three') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three') and +date_field < '1904-01-01' and +year_field < '1904' and +time_field < '04:04:04' and +date_time < '1904-04-04 04:04:04' +order by auto; + +select auto from t1 where +string <= "dddd" and +vstring <= "dddd" and +bin <= 0xDDDD and +vbin <= 0xDDDD and +tiny >= -4 and +short >= -4 and +medium >= -4 and +long_int >= -4 and +longlong >= -4 and +real_float <= 4.5 and +real_double <= 4.5 and +real_decimal <= 4.5 and +utiny <= 4 - 1 + 1 and /* Checking function composition */ +ushort <= 4 and +umedium <= 4 and +ulong <= 4 and +ulonglong <= 4 and +/* bits <= b'100' and */ +(options = 'one' or options = 'two' or options = 'three' or options = 'four') and +(flags = 'one' or flags = 'one,two' or flags = 'one,two,three' or flags = 'one,two,three,four') and +date_field <= '1904-04-04' and +year_field <= '1904' and +time_field <= '04:04:04' and +date_time <= '1904-04-04 04:04:04' +order by auto; + +# Test LIKE/NOT LIKE +select auto from t1 where +string like "b%" and +vstring like "b%" and +bin like concat(0xBB, '%') and +vbin like concat(0xBB, '%') +order by auto; + +select auto from t1 where +string not like "b%" and +vstring not like "b%" and +bin not like concat(0xBB, '%') and +vbin not like concat(0xBB, '%') +order by auto; + +# Various tests +select * from t2 where attr3 is null or attr1 > 2 and pk1= 3 order by pk1; +select * from t2 where attr3 is not null and attr1 > 2 order by pk1; +select * from t3 where attr2 > 9223372036854775803 and attr3 != 3 order by pk1; +select * from t2,t3 where t2.attr1 < 1 and t2.attr2 = t3.attr2 and t3.attr1 < 5 order by t2.pk1; +select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1; +select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1; + +# Some tests that are currently not supported and should not push condition +select auto from t1 where string = "aaaa" collate latin1_general_ci order by auto; +select * from t2 where (attr1 < 2) = (attr2 < 2) order by pk1; + +set engine_condition_pushdown = @old_ecpd; +DROP TABLE t1,t2,t3,t4; diff --git a/ndb/Makefile.am b/ndb/Makefile.am index 32c821383e6..3aac54b38ee 100644 --- a/ndb/Makefile.am +++ b/ndb/Makefile.am @@ -1,12 +1,13 @@ SUBDIRS = src tools . include @ndb_opt_subdirs@ DIST_SUBDIRS = src tools include test docs -EXTRA_DIST = config +EXTRA_DIST = config ndbapi-examples include $(top_srcdir)/ndb/config/common.mk.am dist-hook: -rm -rf `find $(distdir) -type d -name SCCS` -rm -rf `find $(distdir) -type d -name old_files` + -rm -rf `find $(distdir)/ndbapi-examples -name '*.o'` list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" != "." -a "$$subdir" != "include"; then \ files="`find $$subdir -name '*\.h'` `find $$subdir -name '*\.hpp'`"; \ diff --git a/ndb/docs/Makefile.am b/ndb/docs/Makefile.am index 1f00d463f08..afa91857771 100644 --- a/ndb/docs/Makefile.am +++ b/ndb/docs/Makefile.am @@ -48,7 +48,7 @@ ndbapi.html: $(noinst_HEADERS) @RM@ -rf $(DOXYTMP) $(DOXYOUT); \ mkdir -p $(DOXYTMP) $(DOXYOUT); \ @CP@ $(top_srcdir)/ndb/include/ndbapi/* $(DOXYTMP); \ - @CP@ $(top_srcdir)/ndb/examples/*/*.[ch]pp $(DOXYTMP); \ + @CP@ $(top_srcdir)/ndb/ndbapi-examples/*/*.[ch]pp $(DOXYTMP); \ @PERL@ $(DOXYDIR)/predoxy.pl; \ mv footer.html $(DOXYTMP); \ (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.ndbapi); \ diff --git a/ndb/docs/doxygen/Doxyfile.mgmapi b/ndb/docs/doxygen/Doxyfile.mgmapi index db0b31f11ab..1e743dcb60e 100644 --- a/ndb/docs/doxygen/Doxyfile.mgmapi +++ b/ndb/docs/doxygen/Doxyfile.mgmapi @@ -688,7 +688,8 @@ INCLUDE_FILE_PATTERNS = # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. -PREDEFINED = DOXYGEN_SHOULD_SKIP_DEPRECATED \ +PREDEFINED = DOXYGEN_FIX \ + DOXYGEN_SHOULD_SKIP_DEPRECATED \ DOXYGEN_SHOULD_SKIP_INTERNAL \ protected=private diff --git a/ndb/examples/configurations/demos.tar b/ndb/examples/configurations/demos.tar Binary files differdeleted file mode 100644 index d8cae90ec5b..00000000000 --- a/ndb/examples/configurations/demos.tar +++ /dev/null diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 7c45dbd353d..6308cf25465 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -17,12 +17,12 @@ #ifndef EVENTLOGGER_H #define EVENTLOGGER_H -#include <Logger.hpp> -#include <FileLogHandler.hpp> -#include <GrepError.hpp> -#include <kernel_types.h> +#include <logger/Logger.hpp> +#include <logger/FileLogHandler.hpp> +#include "GrepError.hpp" +#include <kernel/kernel_types.h> #include <kernel/LogLevel.hpp> -#include <signaldata/EventReport.hpp> +#include <kernel/signaldata/EventReport.hpp> class EventLoggerBase { public: @@ -39,11 +39,14 @@ public: * threshold - is in range [0-15] * severity - DEBUG to ALERT (Type of log message) */ + typedef void (* EventTextFunction)(char *,size_t,const Uint32*); + struct EventRepLogLevelMatrix { - Ndb_logevent_type eventType; - LogLevel::EventCategory eventCategory; - Uint32 threshold; - Logger::LoggerLevel severity; + Ndb_logevent_type eventType; + LogLevel::EventCategory eventCategory; + Uint32 threshold; + Logger::LoggerLevel severity; + EventTextFunction textF; }; static const EventRepLogLevelMatrix matrix[]; @@ -51,7 +54,8 @@ public: static int event_lookup(int eventType, LogLevel::EventCategory &cat, Uint32 &threshold, - Logger::LoggerLevel &severity); + Logger::LoggerLevel &severity, + EventTextFunction &textF); }; /** @@ -130,17 +134,18 @@ public: * @param nodeId the node id of event origin. */ virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0); + /** * Returns the event text for the specified event report type. * - * @param type the event type. + * @param textF print function for the event * @param theData the event data. * @param nodeId a node id. * @return the event report text. */ static const char* getText(char * dst, size_t dst_len, - int type, + EventTextFunction textF, const Uint32* theData, NodeId nodeId = 0); /** diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp index 382016ee761..60dcd36ab56 100644 --- a/ndb/include/kernel/LogLevel.hpp +++ b/ndb/include/kernel/LogLevel.hpp @@ -147,7 +147,7 @@ LogLevel::set_max(const LogLevel & org){ return * this; } -#include <signaldata/EventSubscribeReq.hpp> +#include "signaldata/EventSubscribeReq.hpp" inline LogLevel& diff --git a/ndb/include/kernel/signaldata/CreateIndx.hpp b/ndb/include/kernel/signaldata/CreateIndx.hpp index 5563f80a555..bb099533301 100644 --- a/ndb/include/kernel/signaldata/CreateIndx.hpp +++ b/ndb/include/kernel/signaldata/CreateIndx.hpp @@ -207,7 +207,7 @@ public: NotUnique = 4251, AllocationError = 4252, CreateIndexTableFailed = 4253, - InvalidAttributeOrder = 4255 + DuplicateAttributes = 4258 }; CreateIndxConf m_conf; diff --git a/ndb/include/kernel/signaldata/SignalData.hpp b/ndb/include/kernel/signaldata/SignalData.hpp index 2b29ca06ba0..f825b0feb7b 100644 --- a/ndb/include/kernel/signaldata/SignalData.hpp +++ b/ndb/include/kernel/signaldata/SignalData.hpp @@ -18,8 +18,8 @@ #define SIGNAL_DATA_H #include <ndb_global.h> -#include <ndb_limits.h> -#include <kernel_types.h> +#include <kernel/ndb_limits.h> +#include <kernel/kernel_types.h> #include <BaseString.hpp> #define ASSERT_BOOL(flag, message) assert(flag<=1) diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index a63896dc333..17c227853fe 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -86,6 +86,53 @@ * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; * int fd = ndb_mgm_listen_event(handle, filter); * @endcode + * + * + * @section secSLogEvents Structured Log Events + * + * The following steps are involved: + * - Create a NdbEventLogHandle using ndb_mgm_create_logevent_handle() + * - Wait and store log events using ndb_logevent_get_next() + * - The log event data is available in the struct ndb_logevent. The + * data which is specific to a particular event is stored in a union + * between structs so use ndb_logevent::type to decide which struct + * is valid. + * + * Sample code for listening to Backup related events. The availaable log + * events are listed in @ref ndb_logevent.h + * + * @code + * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; + * NdbEventLogHandle le_handle= ndb_mgm_create_logevent_handle(handle, filter); + * struct ndb_logevent le; + * int r= ndb_logevent_get_next(le_handle,&le,0); + * if (r < 0) error + * else if (r == 0) no event + * + * switch (le.type) + * { + * case NDB_LE_BackupStarted: + * ... le.BackupStarted.starting_node; + * ... le.BackupStarted.backup_id; + * break; + * case NDB_LE_BackupFailedToStart: + * ... le.BackupFailedToStart.error; + * break; + * case NDB_LE_BackupCompleted: + * ... le.BackupCompleted.stop_gci; + * break; + * case NDB_LE_BackupAborted: + * ... le.BackupStarted.backup_id; + * break; + * default: + * break; + * } + * @endcode + */ + +/* + * @page ndb_logevent.h ndb_logevent.h + * @include ndb_logevent.h */ /** @addtogroup MGM_C_API @@ -93,6 +140,8 @@ */ #include <ndb_types.h> +#include <NdbTCP.h> +#include "ndb_logevent.h" #include "mgmapi_config_parameters.h" #ifdef __cplusplus @@ -348,97 +397,6 @@ extern "C" { }; #endif - /** - * Log event severities (used to filter the cluster log, - * ndb_mgm_set_clusterlog_severity_filter(), and filter listening to events - * ndb_mgm_listen_event()) - */ - enum ndb_mgm_event_severity { - NDB_MGM_ILLEGAL_EVENT_SEVERITY = -1, - /* Must be a nonnegative integer (used for array indexing) */ - /** Cluster log on */ - NDB_MGM_EVENT_SEVERITY_ON = 0, - /** Used in NDB Cluster developement */ - NDB_MGM_EVENT_SEVERITY_DEBUG = 1, - /** Informational messages*/ - NDB_MGM_EVENT_SEVERITY_INFO = 2, - /** Conditions that are not error condition, but might require handling. - */ - NDB_MGM_EVENT_SEVERITY_WARNING = 3, - /** Conditions that, while not fatal, should be corrected. */ - NDB_MGM_EVENT_SEVERITY_ERROR = 4, - /** Critical conditions, like device errors or out of resources */ - NDB_MGM_EVENT_SEVERITY_CRITICAL = 5, - /** A condition that should be corrected immediately, - * such as a corrupted system - */ - NDB_MGM_EVENT_SEVERITY_ALERT = 6, - /* must be next number, works as bound in loop */ - /** All severities */ - NDB_MGM_EVENT_SEVERITY_ALL = 7 - }; - - /** - * Log event categories, used to set filter level on the log events using - * ndb_mgm_set_clusterlog_loglevel() and ndb_mgm_listen_event() - */ - enum ndb_mgm_event_category { - /** - * Invalid log event category - */ - NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, - /** - * Log events during all kinds of startups - */ - NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP, - /** - * Log events during shutdown - */ - NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN, - /** - * Statistics log events - */ - NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS, - /** - * Log events related to checkpoints - */ - NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT, - /** - * Log events during node restart - */ - NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART, - /** - * Log events related to connections between cluster nodes - */ - NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION, - /** - * Backup related log events - */ - NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP, - /** - * Congestion related log events - */ - NDB_MGM_EVENT_CATEGORY_CONGESTION = CFG_LOGLEVEL_CONGESTION, -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Loglevel debug - */ - NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG, -#endif - /** - * Uncategorized log events (severity info) - */ - NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO, - /** - * Uncategorized log events (severity warning or higher) - */ - NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR, -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL, - NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL -#endif - }; - /***************************************************************************/ /** * @name Functions: Error Handling @@ -567,6 +525,13 @@ extern "C" { */ int ndb_mgm_connect(NdbMgmHandle handle, int no_retries, int retry_delay_in_seconds, int verbose); + /** + * Return true if connected. + * + * @param handle Management handle + * @return 0 if not connected, non-zero if connected. + */ + int ndb_mgm_is_connected(NdbMgmHandle handle); /** * Disconnects from a management server @@ -877,6 +842,64 @@ extern "C" { struct ndb_mgm_reply* reply); #endif + /** + * The NdbLogEventHandle + */ + typedef struct ndb_logevent_handle * NdbLogEventHandle; + + /** + * Listen to log events. + * + * @param handle NDB management handle. + * @param filter pairs of { level, ndb_mgm_event_category } that will be + * pushed to fd, level=0 ends list. + * + * @return NdbLogEventHandle + */ + NdbLogEventHandle ndb_mgm_create_logevent_handle(NdbMgmHandle, + const int filter[]); + void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle*); + + /** + * Retrieve filedescriptor from NdbLogEventHandle. May be used in + * e.g. an application select() statement. + * + * @note Do not attemt to read from it, it will corrupt the parsing. + * + * @return filedescriptor, -1 on failure. + */ + int ndb_logevent_get_fd(const NdbLogEventHandle); + + /** + * Attempt to retrieve next log event and will fill in the supplied + * struct dst + * + * @param dst Pointer to struct to fill in event information + * @param timeout_in_milliseconds Timeout for waiting for event + * + * @return >0 if event exists, 0 no event (timed out), or -1 on error. + * + * @note Return value <=0 will leave dst untouched + */ + int ndb_logevent_get_next(const NdbLogEventHandle, + struct ndb_logevent *dst, + unsigned timeout_in_milliseconds); + + /** + * Retrieve laterst error code + * + * @return error code + */ + int ndb_logevent_get_latest_error(const NdbLogEventHandle); + + /** + * Retrieve laterst error message + * + * @return error message + */ + const char *ndb_logevent_get_latest_error_msg(const NdbLogEventHandle); + + /** @} *********************************************************************/ /** * @name Functions: Backup @@ -961,6 +984,19 @@ extern "C" { int ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned version, int nodetype); + + + /** + * Convert connection to transporter + * @param handle NDB management handle. + * + * @return socket + * + * @note the socket is now able to be used as a transporter connection + */ + NDB_SOCKET_TYPE ndb_mgm_convert_to_transporter(NdbMgmHandle handle); + + /** * Config iterator */ diff --git a/ndb/include/mgmapi/mgmapi_debug.h b/ndb/include/mgmapi/mgmapi_debug.h index cbf9878f163..32a89535456 100644 --- a/ndb/include/mgmapi/mgmapi_debug.h +++ b/ndb/include/mgmapi/mgmapi_debug.h @@ -146,7 +146,7 @@ extern "C" { int node1, int node2, int param, - unsigned value, + int value, struct ndb_mgm_reply* reply); /** @@ -165,7 +165,7 @@ extern "C" { int node1, int node2, int param, - Uint32 *value, + int *value, struct ndb_mgm_reply* reply); #ifdef __cplusplus diff --git a/ndb/include/mgmapi/ndb_logevent.h b/ndb/include/mgmapi/ndb_logevent.h index ca6f848206f..d5744b0fffe 100644 --- a/ndb/include/mgmapi/ndb_logevent.h +++ b/ndb/include/mgmapi/ndb_logevent.h @@ -17,90 +17,602 @@ #ifndef NDB_LOGEVENT_H #define NDB_LOGEVENT_H +/** @addtogroup MGM_C_API + * @{ + */ + +#include "mgmapi_config_parameters.h" + #ifdef __cplusplus extern "C" { #endif + /** + * Available log events grouped by @ref ndb_mgm_event_category + */ + enum Ndb_logevent_type { - /* CONNECTION */ + + NDB_LE_ILLEGAL_TYPE = -1, + + /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ NDB_LE_Connected = 0, + /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ NDB_LE_Disconnected = 1, + /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ NDB_LE_CommunicationClosed = 2, + /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ NDB_LE_CommunicationOpened = 3, + /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ NDB_LE_ConnectedApiVersion = 51, - /* CHECKPOINT */ + + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_GlobalCheckpointStarted = 4, + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_GlobalCheckpointCompleted = 5, + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_LocalCheckpointStarted = 6, + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_LocalCheckpointCompleted = 7, + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_LCPStoppedInCalcKeepGci = 8, + /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ NDB_LE_LCPFragmentCompleted = 9, - /* STARTUP */ + + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_NDBStartStarted = 10, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_NDBStartCompleted = 11, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_STTORRYRecieved = 12, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_StartPhaseCompleted = 13, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_CM_REGCONF = 14, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_CM_REGREF = 15, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_FIND_NEIGHBOURS = 16, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_NDBStopStarted = 17, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_NDBStopAborted = 18, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_StartREDOLog = 19, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_StartLog = 20, + /** NDB_MGM_EVENT_CATEGORY_STARTUP */ NDB_LE_UNDORecordsExecuted = 21, - /* NODERESTART */ + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NR_CopyDict = 22, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NR_CopyDistr = 23, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NR_CopyFragsStarted = 24, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NR_CopyFragDone = 25, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NR_CopyFragsCompleted = 26, /* NODEFAIL */ + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NodeFailCompleted = 27, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_NODE_FAILREP = 28, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_ArbitState = 29, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_ArbitResult = 30, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_GCP_TakeoverStarted = 31, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_GCP_TakeoverCompleted = 32, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_LCP_TakeoverStarted = 33, + /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ NDB_LE_LCP_TakeoverCompleted = 34, - /* STATISTIC */ + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_TransReportCounters = 35, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_OperationReportCounters = 36, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_TableCreated = 37, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_UndoLogBlocked = 38, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_JobStatistic = 39, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_SendBytesStatistic = 40, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_ReceiveBytesStatistic = 41, + /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ NDB_LE_MemoryUsage = 50, - /* ERROR */ + /** NDB_MGM_EVENT_CATEGORY_ERROR */ NDB_LE_TransporterError = 42, + /** NDB_MGM_EVENT_CATEGORY_ERROR */ NDB_LE_TransporterWarning = 43, + /** NDB_MGM_EVENT_CATEGORY_ERROR */ NDB_LE_MissedHeartbeat = 44, + /** NDB_MGM_EVENT_CATEGORY_ERROR */ NDB_LE_DeadDueToHeartbeat = 45, + /** NDB_MGM_EVENT_CATEGORY_ERROR */ NDB_LE_WarningEvent = 46, - /* INFO */ + /** NDB_MGM_EVENT_CATEGORY_INFO */ NDB_LE_SentHeartbeat = 47, + /** NDB_MGM_EVENT_CATEGORY_INFO */ NDB_LE_CreateLogBytes = 48, + /** NDB_MGM_EVENT_CATEGORY_INFO */ NDB_LE_InfoEvent = 49, /* GREP */ NDB_LE_GrepSubscriptionInfo = 52, NDB_LE_GrepSubscriptionAlert = 53, - /* BACKUP */ + /** NDB_MGM_EVENT_CATEGORY_BACKUP */ NDB_LE_BackupStarted = 54, + /** NDB_MGM_EVENT_CATEGORY_BACKUP */ NDB_LE_BackupFailedToStart = 55, + /** NDB_MGM_EVENT_CATEGORY_BACKUP */ NDB_LE_BackupCompleted = 56, + /** NDB_MGM_EVENT_CATEGORY_BACKUP */ NDB_LE_BackupAborted = 57 }; + /** + * Log event severities (used to filter the cluster log, + * ndb_mgm_set_clusterlog_severity_filter(), and filter listening to events + * ndb_mgm_listen_event()) + */ + enum ndb_mgm_event_severity { + NDB_MGM_ILLEGAL_EVENT_SEVERITY = -1, + /* Must be a nonnegative integer (used for array indexing) */ + /** Cluster log on */ + NDB_MGM_EVENT_SEVERITY_ON = 0, + /** Used in NDB Cluster developement */ + NDB_MGM_EVENT_SEVERITY_DEBUG = 1, + /** Informational messages*/ + NDB_MGM_EVENT_SEVERITY_INFO = 2, + /** Conditions that are not error condition, but might require handling. + */ + NDB_MGM_EVENT_SEVERITY_WARNING = 3, + /** Conditions that, while not fatal, should be corrected. */ + NDB_MGM_EVENT_SEVERITY_ERROR = 4, + /** Critical conditions, like device errors or out of resources */ + NDB_MGM_EVENT_SEVERITY_CRITICAL = 5, + /** A condition that should be corrected immediately, + * such as a corrupted system + */ + NDB_MGM_EVENT_SEVERITY_ALERT = 6, + /* must be next number, works as bound in loop */ + /** All severities */ + NDB_MGM_EVENT_SEVERITY_ALL = 7 + }; + + /** + * Log event categories, used to set filter level on the log events using + * ndb_mgm_set_clusterlog_loglevel() and ndb_mgm_listen_event() + */ + enum ndb_mgm_event_category { + /** + * Invalid log event category + */ + NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, + /** + * Log events during all kinds of startups + */ + NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP, + /** + * Log events during shutdown + */ + NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN, + /** + * Statistics log events + */ + NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS, + /** + * Log events related to checkpoints + */ + NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT, + /** + * Log events during node restart + */ + NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART, + /** + * Log events related to connections between cluster nodes + */ + NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION, + /** + * Backup related log events + */ + NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP, + /** + * Congestion related log events + */ + NDB_MGM_EVENT_CATEGORY_CONGESTION = CFG_LOGLEVEL_CONGESTION, +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + /** + * Loglevel debug + */ + NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG, +#endif + /** + * Uncategorized log events (severity info) + */ + NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO, + /** + * Uncategorized log events (severity warning or higher) + */ + NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR, +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL, + NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL +#endif + }; + + /** + * Structure to store and retrieve log event information. + * @see @ref secSLogEvents + */ + struct ndb_logevent { + /** NdbLogEventHandle (to be used for comparing only) + * set in ndb_logevent_get_next() + */ + void *handle; + + /** Which event */ + enum Ndb_logevent_type type; + + /** Time when log event was registred at the management server */ + unsigned time; + + /** Category of log event */ + enum ndb_mgm_event_category category; + + /** Severity of log event */ + enum ndb_mgm_event_severity severity; + + /** Level (0-15) of log event */ + unsigned level; + + /** Node ID of the node that reported the log event */ + unsigned source_nodeid; + + /** Union of log event specific data. Use @ref type to decide + * which struct to use + */ + union { + /* CONNECT */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } Connected; + + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } Disconnected; + + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } CommunicationClosed; + + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } CommunicationOpened; + + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + unsigned version; + } ConnectedApiVersion; + + /* CHECKPOINT */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned gci; + } GlobalCheckpointStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned gci; + } GlobalCheckpointCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned lci; + unsigned keep_gci; + unsigned restore_gci; + } LocalCheckpointStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned lci; + } LocalCheckpointCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned data; + } LCPStoppedInCalcKeepGci; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + unsigned table_id; + unsigned fragment_id; + } LCPFragmentCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned acc_count; + unsigned tup_count; + } UndoLogBlocked; + + /* STARTUP */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned version; + } NDBStartStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned version; + } NDBStartCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } STTORRYRecieved; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned phase; + unsigned starttype; + } StartPhaseCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned own_id; + unsigned president_id; + unsigned dynamic_id; + } CM_REGCONF; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned own_id; + unsigned other_id; + unsigned cause; + } CM_REGREF; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned own_id; + unsigned left_id; + unsigned right_id; + unsigned dynamic_id; + } FIND_NEIGHBOURS; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned stoptype; + } NDBStopStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } NDBStopAborted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + unsigned keep_gci; + unsigned completed_gci; + unsigned restorable_gci; + } StartREDOLog; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned log_part; + unsigned start_mb; + unsigned stop_mb; + unsigned gci; + } StartLog; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned block; + unsigned data1; + unsigned data2; + unsigned data3; + unsigned data4; + unsigned data5; + unsigned data6; + unsigned data7; + unsigned data8; + unsigned data9; + unsigned data10; + } UNDORecordsExecuted; + + /* NODERESTART */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } NR_CopyDict; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } NR_CopyDistr; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned dest_node; + } NR_CopyFragsStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned dest_node; + unsigned table_id; + unsigned fragment_id; + } NR_CopyFragDone; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned dest_node; + } NR_CopyFragsCompleted; + + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned block; /* 0 = all */ + unsigned failed_node; + unsigned completing_node; /* 0 = all */ + } NodeFailCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned failed_node; + unsigned failure_state; + } NODE_FAILREP; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned code; /* code & state << 16 */ + unsigned arbit_node; + unsigned ticket_0; + unsigned ticket_1; + /* TODO */ + } ArbitState; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned code; /* code & state << 16 */ + unsigned arbit_node; + unsigned ticket_0; + unsigned ticket_1; + /* TODO */ + } ArbitResult; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } GCP_TakeoverStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } GCP_TakeoverCompleted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + } LCP_TakeoverStarted; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned state; + } LCP_TakeoverCompleted; + + /* STATISTIC */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned trans_count; + unsigned commit_count; + unsigned read_count; + unsigned simple_read_count; + unsigned write_count; + unsigned attrinfo_count; + unsigned conc_op_count; + unsigned abort_count; + unsigned scan_count; + unsigned range_scan_count; + } TransReportCounters; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned ops; + } OperationReportCounters; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned table_id; + } TableCreated; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned mean_loop_count; + } JobStatistic; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned to_node; + unsigned mean_sent_bytes; + } SendBytesStatistic; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned from_node; + unsigned mean_received_bytes; + } ReceiveBytesStatistic; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + int gth; + unsigned page_size_kb; + unsigned pages_used; + unsigned pages_total; + unsigned block; + } MemoryUsage; + + /* ERROR */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned to_node; + unsigned code; + } TransporterError; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned to_node; + unsigned code; + } TransporterWarning; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + unsigned count; + } MissedHeartbeat; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } DeadDueToHeartbeat; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + /* TODO */ + } WarningEvent; + + /* INFO */ + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } SentHeartbeat; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + unsigned node; + } CreateLogBytes; + /** Log event specific data for for corresponding NDB_LE_ log event */ + struct { + /* TODO */ + } InfoEvent; + + /** Log event data for @ref NDB_LE_BackupStarted */ + struct { + unsigned starting_node; + unsigned backup_id; + } BackupStarted; + /** Log event data @ref NDB_LE_BackupFailedToStart */ + struct { + unsigned starting_node; + unsigned error; + } BackupFailedToStart; + /** Log event data @ref NDB_LE_BackupCompleted */ + struct { + unsigned starting_node; + unsigned backup_id; + unsigned start_gci; + unsigned stop_gci; + unsigned n_records; + unsigned n_log_records; + unsigned n_bytes; + unsigned n_log_bytes; + } BackupCompleted; + /** Log event data @ref NDB_LE_BackupAborted */ + struct { + unsigned starting_node; + unsigned backup_id; + unsigned error; + } BackupAborted; +#ifndef DOXYGEN_FIX + }; +#else + } <union>; +#endif + }; + +enum ndb_logevent_handle_error { + NDB_LEH_NO_ERROR, + NDB_LEH_READ_ERROR, + NDB_LEH_MISSING_EVENT_SPECIFIER, + NDB_LEH_UNKNOWN_EVENT_TYPE, + NDB_LEH_UNKNOWN_EVENT_VARIABLE, + NDB_LEH_INTERNAL_ERROR +}; + #ifdef __cplusplus } #endif +/** @} */ + #endif diff --git a/ndb/include/ndbapi/NdbScanFilter.hpp b/ndb/include/ndbapi/NdbScanFilter.hpp index 7c575169dc1..b5457bab99b 100644 --- a/ndb/include/ndbapi/NdbScanFilter.hpp +++ b/ndb/include/ndbapi/NdbScanFilter.hpp @@ -53,7 +53,9 @@ public: COND_GE = 2, ///< upper bound COND_GT = 3, ///< upper bound, strict COND_EQ = 4, ///< equality - COND_NE = 5 ///< not equal + COND_NE = 5, ///< not equal + COND_LIKE = 6, ///< like + COND_NOT_LIKE = 7 ///< not like }; /** diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp index d4763ba4c37..18d1ec76a3c 100644 --- a/ndb/include/transporter/TransporterDefinitions.hpp +++ b/ndb/include/transporter/TransporterDefinitions.hpp @@ -49,74 +49,50 @@ enum SendStatus { const Uint32 MAX_MESSAGE_SIZE = (12+4+4+(4*25)+(3*4)+4*4096); /** - * TCP Transporter Configuration + * TransporterConfiguration + * + * used for setting up a transporter. the union member specific is for + * information specific to a transporter type. */ -struct TCP_TransporterConfiguration { - Uint32 port; +struct TransporterConfiguration { + Uint32 port; const char *remoteHostName; const char *localHostName; NodeId remoteNodeId; NodeId localNodeId; - Uint32 sendBufferSize; // Size of SendBuffer of priority B - Uint32 maxReceiveSize; // Maximum no of bytes to receive + NodeId serverNodeId; bool checksum; bool signalId; -}; - -/** - * SHM Transporter Configuration - */ -struct SHM_TransporterConfiguration { - Uint32 port; - const char *remoteHostName; - const char *localHostName; - NodeId remoteNodeId; - NodeId localNodeId; - bool checksum; - bool signalId; - - Uint32 shmKey; - Uint32 shmSize; - int signum; -}; - -/** - * OSE Transporter Configuration - */ -struct OSE_TransporterConfiguration { - const char *remoteHostName; - const char *localHostName; - NodeId remoteNodeId; - NodeId localNodeId; - bool checksum; - bool signalId; - - Uint32 prioASignalSize; - Uint32 prioBSignalSize; - Uint32 receiveBufferSize; // In number of signals -}; - -/** - * SCI Transporter Configuration - */ -struct SCI_TransporterConfiguration { - const char *remoteHostName; - const char *localHostName; - Uint32 port; - Uint32 sendLimit; // Packet size - Uint32 bufferSize; // Buffer size - - Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host - - Uint32 remoteSciNodeId0; // SCInodeId for adapter 1 - Uint32 remoteSciNodeId1; // SCInodeId for adapter 2 - - NodeId localNodeId; // Local node Id - NodeId remoteNodeId; // Remote node Id - - bool checksum; - bool signalId; - + bool isMgmConnection; // is a mgm connection, requires transforming + + union { // Transporter specific configuration information + + struct { + Uint32 sendBufferSize; // Size of SendBuffer of priority B + Uint32 maxReceiveSize; // Maximum no of bytes to receive + } tcp; + + struct { + Uint32 shmKey; + Uint32 shmSize; + int signum; + } shm; + + struct { + Uint32 prioASignalSize; + Uint32 prioBSignalSize; + } ose; + + struct { + Uint32 sendLimit; // Packet size + Uint32 bufferSize; // Buffer size + + Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host + + Uint32 remoteSciNodeId0; // SCInodeId for adapter 1 + Uint32 remoteSciNodeId1; // SCInodeId for adapter 2 + } sci; + }; }; struct SignalHeader { diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp index da0849c3f50..a31fa1d5ce2 100644 --- a/ndb/include/transporter/TransporterRegistry.hpp +++ b/ndb/include/transporter/TransporterRegistry.hpp @@ -30,6 +30,7 @@ #include "TransporterDefinitions.hpp" #include <SocketServer.hpp> +#include <SocketClient.hpp> #include <NdbTCP.h> @@ -96,12 +97,17 @@ public: /** * Constructor */ - TransporterRegistry(NdbMgmHandle mgm_handle=NULL, - void * callback = 0 , + TransporterRegistry(void * callback = 0 , unsigned maxTransporters = MAX_NTRANSPORTERS, unsigned sizeOfLongSignalMemory = 100); - void set_mgm_handle(NdbMgmHandle h) { m_mgm_handle = h; }; + /** + * this handle will be used in the client connect thread + * to fetch information on dynamic ports. The old handle + * (if set) is destroyed, and this is destroyed by the destructor + */ + void set_mgm_handle(NdbMgmHandle h); + NdbMgmHandle get_mgm_handle(void) { return m_mgm_handle; }; bool init(NodeId localNodeId); @@ -111,6 +117,11 @@ public: bool connect_server(NDB_SOCKET_TYPE sockfd); /** + * use a mgmd connection to connect as a transporter + */ + NDB_SOCKET_TYPE connect_ndb_mgmd(SocketClient *sc); + + /** * Remove all transporters */ void removeAll(); @@ -179,10 +190,10 @@ public: * started, startServer is called. A transporter of the selected kind * is created and it is put in the transporter arrays. */ - bool createTransporter(struct TCP_TransporterConfiguration * config); - bool createTransporter(struct SCI_TransporterConfiguration * config); - bool createTransporter(struct SHM_TransporterConfiguration * config); - bool createTransporter(struct OSE_TransporterConfiguration * config); + bool createTCPTransporter(struct TransporterConfiguration * config); + bool createSCITransporter(struct TransporterConfiguration * config); + bool createSHMTransporter(struct TransporterConfiguration * config); + bool createOSETransporter(struct TransporterConfiguration * config); /** * prepareSend @@ -232,12 +243,12 @@ public: class Transporter_interface { public: NodeId m_remote_nodeId; - unsigned short m_service_port; + int m_s_service_port; // signed port number const char *m_interface; }; Vector<Transporter_interface> m_transporter_interface; void add_transporter_interface(NodeId remoteNodeId, const char *interf, - unsigned short port); + int s_port); // signed port. <0 is dynamic Transporter* get_transporter(NodeId nodeId); NodeId get_localNodeId() { return localNodeId; }; diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index 41e4dadfd60..a59b9da39cd 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -26,20 +26,6 @@ typedef struct charset_info_st CHARSET_INFO; class NdbSqlUtil { public: /** - * Compare strings, optionally with padded semantics. Returns - * negative (less), zero (equal), or positive (greater). - */ - static int char_compare(const char* s1, unsigned n1, - const char* s2, unsigned n2, bool padded); - - /** - * Like operator, optionally with padded semantics. Returns true or - * false. - */ - static bool char_like(const char* s1, unsigned n1, - const char* s2, unsigned n2, bool padded); - - /** * Compare attribute values. Returns -1, 0, +1 for less, equal, * greater, respectively. Parameters are pointers to values and their * lengths in bytes. The lengths can differ. @@ -48,7 +34,7 @@ public: * the partial value is not enough to determine the result, CmpUnknown * will be returned. A shorter second value is not necessarily * partial. Partial values are allowed only for types where prefix - * comparison is possible (basically, binary types). + * comparison is possible (basically, binary strings). * * First parameter is a pointer to type specific extra info. Char * types receive CHARSET_INFO in it. @@ -58,6 +44,18 @@ public: */ typedef int Cmp(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full); + /** + * Prototype for "like" comparison. Defined for string types. Second + * argument must have same type-specific format. Returns 0 on match, + * +1 on no match, and -1 on bad data. + * + * Uses default special chars ( \ % _ ). + * + * TODO convert special chars to the cs so that ucs2 etc works + * TODO allow user-defined escape ( \ ) + */ + typedef int Like(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2); + enum CmpResult { CmpLess = -1, CmpEqual = 0, @@ -101,6 +99,7 @@ public: }; Enum m_typeId; // redundant Cmp* m_cmp; // comparison method + Like* m_like; // "like" comparison method }; /** @@ -110,7 +109,8 @@ public: /** * Get the normalized type used in hashing and key comparisons. - * Maps all string types to Binary. + * Maps all string types to Binary. This includes Var* strings + * because strxfrm result is padded to fixed (maximum) length. */ static const Type& getTypeBinary(Uint32 typeId); @@ -176,6 +176,13 @@ private: static Cmp cmpOlddecimalunsigned; static Cmp cmpDecimal; static Cmp cmpDecimalunsigned; + // + static Like likeChar; + static Like likeBinary; + static Like likeVarchar; + static Like likeVarbinary; + static Like likeLongvarchar; + static Like likeLongvarbinary; }; #endif diff --git a/ndb/include/util/SocketClient.hpp b/ndb/include/util/SocketClient.hpp index de9a081464a..bf1ad7d45d6 100644 --- a/ndb/include/util/SocketClient.hpp +++ b/ndb/include/util/SocketClient.hpp @@ -31,6 +31,12 @@ public: SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa = 0); ~SocketClient(); bool init(); + void set_port(unsigned short port) { + m_port = port; + m_servaddr.sin_port = htons(m_port); + }; + unsigned short get_port() { return m_port; }; + char *get_server_name() { return m_server_name; }; NDB_SOCKET_TYPE connect(); bool close(); }; diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp index 9d8af204391..2e1afb74945 100644 --- a/ndb/include/util/SocketServer.hpp +++ b/ndb/include/util/SocketServer.hpp @@ -83,7 +83,7 @@ public: * bind & listen * Returns false if no success */ - bool setup(Service *, unsigned short port, const char * pinterface = 0); + bool setup(Service *, unsigned short *port, const char * pinterface = 0); /** * start/stop the server diff --git a/ndb/include/util/ndb_opts.h b/ndb/include/util/ndb_opts.h index aa7a02f58ae..72abd6d5d7a 100644 --- a/ndb/include/util/ndb_opts.h +++ b/ndb/include/util/ndb_opts.h @@ -25,10 +25,19 @@ #include <ndb_opt_defaults.h> #define NDB_STD_OPTS_VARS \ -const char *opt_connect_str= 0;\ my_bool opt_ndb_optimized_node_selection +int opt_ndb_nodeid; my_bool opt_ndb_shm; +const char *opt_ndb_connectstring= 0; +const char *opt_connect_str= 0; +const char *opt_ndb_mgmd= 0; +char opt_ndb_constrbuf[1024]; +unsigned opt_ndb_constrbuf_len= 0; + +#ifndef DBUG_OFF +const char *opt_debug= 0; +#endif #define OPT_NDB_CONNECTSTRING 'c' @@ -43,8 +52,17 @@ my_bool opt_ndb_shm; "Set connect string for connecting to ndb_mgmd. " \ "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \ "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \ - (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \ + (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \ + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ + { "ndb-mgmd-host", OPT_NDB_MGMD, \ + "Set host and port for connecting to ndb_mgmd. " \ + "Syntax: <hostname>[:<port>].", \ + (gptr*) &opt_ndb_mgmd, (gptr*) &opt_ndb_mgmd, 0, \ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ + { "ndb-nodeid", OPT_NDB_NODEID, \ + "Set node id for this node.", \ + (gptr*) &opt_ndb_nodeid, (gptr*) &opt_ndb_nodeid, 0, \ + GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-shm", OPT_NDB_SHM,\ "Allow optimizing using shared memory connections when available",\ (gptr*) &opt_ndb_shm, (gptr*) &opt_ndb_shm, 0,\ @@ -55,13 +73,14 @@ my_bool opt_ndb_shm; (gptr*) &opt_ndb_optimized_node_selection, 0,\ GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\ { "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\ - (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,\ - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 } + (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \ + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 } #ifndef DBUG_OFF #define NDB_STD_OPTS(prog_name) \ { "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \ - 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \ + (gptr*) &opt_debug, (gptr*) &opt_debug, \ + 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \ NDB_STD_OPTS_COMMON #else #define NDB_STD_OPTS(prog_name) NDB_STD_OPTS_COMMON @@ -79,21 +98,29 @@ enum ndb_std_options { OPT_NDB_SHM= 256, OPT_NDB_SHM_SIGNUM, OPT_NDB_OPTIMIZED_NODE_SELECTION, + OPT_NDB_MGMD, + OPT_NDB_NODEID, NDB_STD_OPTIONS_LAST /* should always be last in this enum */ }; static my_bool ndb_std_get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - const char *argument) + char *argument) { switch (optid) { +#ifndef DBUG_OFF case '#': - if (argument) + if (opt_debug) { - DBUG_PUSH(argument); + DBUG_PUSH(opt_debug); + } + else + { + DBUG_PUSH("d:t"); } break; +#endif case 'V': ndb_std_print_version(); exit(0); @@ -110,6 +137,29 @@ ndb_std_get_one_option(int optid, #endif } break; + case OPT_NDB_MGMD: + case OPT_NDB_NODEID: + { + int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"", + optid == OPT_NDB_NODEID ? "nodeid=" : "", + argument); + opt_ndb_constrbuf_len+= len; + } + /* fall through to add the connectstring to the end + * and set opt_ndbcluster_connectstring + */ + case OPT_NDB_CONNECTSTRING: + if (opt_ndb_connectstring && opt_ndb_connectstring[0]) + my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"", + opt_ndb_connectstring); + else + opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0; + opt_connect_str= opt_ndb_constrbuf; + break; } return 0; } diff --git a/ndb/examples/Makefile b/ndb/ndbapi-examples/Makefile index 87a821773ec..965dc3ec29f 100644 --- a/ndb/examples/Makefile +++ b/ndb/ndbapi-examples/Makefile @@ -4,7 +4,8 @@ BIN_DIRS := ndbapi_simple_example \ ndbapi_retries_example \ ndbapi_simple_index_example \ ndbapi_event_example \ - ndbapi_scan_example + ndbapi_scan_example \ + mgmapi_logevent_example bins: $(patsubst %, _bins_%, $(BIN_DIRS)) diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile b/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile new file mode 100644 index 00000000000..c1ca32dfe17 --- /dev/null +++ b/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile @@ -0,0 +1,23 @@ +TARGET = mgmapi_logevent +SRCS = $(TARGET).cpp +OBJS = $(TARGET).o +CXX = g++ +CFLAGS = -c -Wall -fno-rtti -fno-exceptions +CXXFLAGS = +DEBUG = +LFLAGS = -Wall +TOP_SRCDIR = ../../.. +INCLUDE_DIR = $(TOP_SRCDIR) +LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \ + -L$(TOP_SRCDIR)/libmysql_r/.libs \ + -L$(TOP_SRCDIR)/mysys +SYS_LIB = + +$(TARGET): $(OBJS) + $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lz $(SYS_LIB) -o $(TARGET) + +$(TARGET).o: $(SRCS) + $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/mgmapi -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS) + +clean: + rm -f *.o $(TARGET) diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp b/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp new file mode 100644 index 00000000000..5ec1fba6314 --- /dev/null +++ b/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp @@ -0,0 +1,140 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <mysql.h> +#include <ndbapi/NdbApi.hpp> +#include <mgmapi.h> +#include <stdio.h> + +/* + * export LD_LIBRARY_PATH=../../../libmysql_r/.libs:../../../ndb/src/.libs + */ + +#define MGMERROR(h) \ +{ \ + fprintf(stderr, "code: %d msg: %s\n", \ + ndb_mgm_get_latest_error(h), \ + ndb_mgm_get_latest_error_msg(h)); \ + exit(-1); \ +} + +#define LOGEVENTERROR(h) \ +{ \ + fprintf(stderr, "code: %d msg: %s\n", \ + ndb_logevent_get_latest_error(h), \ + ndb_logevent_get_latest_error_msg(h)); \ + exit(-1); \ +} + +int main() +{ + NdbMgmHandle h; + NdbLogEventHandle le; + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, + 15, NDB_MGM_EVENT_CATEGORY_CONNECTION, + 15, NDB_MGM_EVENT_CATEGORY_NODE_RESTART, + 15, NDB_MGM_EVENT_CATEGORY_STARTUP, + 15, NDB_MGM_EVENT_CATEGORY_ERROR, + 0 }; + struct ndb_logevent event; + + ndb_init(); + + h= ndb_mgm_create_handle(); + if ( h == 0) + { + printf("Unable to create handle\n"); + exit(-1); + } + if (ndb_mgm_connect(h,0,0,0)) MGMERROR(h); + + le= ndb_mgm_create_logevent_handle(h, filter); + if ( le == 0 ) MGMERROR(h); + + while (1) + { + int timeout= 5000; + int r= ndb_logevent_get_next(le,&event,timeout); + if (r == 0) + printf("No event within %d milliseconds\n", timeout); + else if (r < 0) + LOGEVENTERROR(le) + else + { + switch (event.type) { + case NDB_LE_BackupStarted: + printf("Node %d: BackupStarted\n", event.source_nodeid); + printf(" Starting node ID: %d\n", event.BackupStarted.starting_node); + printf(" Backup ID: %d\n", event.BackupStarted.backup_id); + break; + case NDB_LE_BackupCompleted: + printf("Node %d: BackupCompleted\n", event.source_nodeid); + printf(" Backup ID: %d\n", event.BackupStarted.backup_id); + break; + case NDB_LE_BackupAborted: + printf("Node %d: BackupAborted\n", event.source_nodeid); + break; + case NDB_LE_BackupFailedToStart: + printf("Node %d: BackupFailedToStart\n", event.source_nodeid); + break; + + case NDB_LE_NodeFailCompleted: + printf("Node %d: NodeFailCompleted\n", event.source_nodeid); + break; + case NDB_LE_ArbitResult: + printf("Node %d: ArbitResult\n", event.source_nodeid); + printf(" code %d, arbit_node %d\n", + event.ArbitResult.code & 0xffff, + event.ArbitResult.arbit_node); + break; + case NDB_LE_DeadDueToHeartbeat: + printf("Node %d: DeadDueToHeartbeat\n", event.source_nodeid); + printf(" node %d\n", event.DeadDueToHeartbeat.node); + break; + + case NDB_LE_Connected: + printf("Node %d: Connected\n", event.source_nodeid); + printf(" node %d\n", event.Connected.node); + break; + case NDB_LE_Disconnected: + printf("Node %d: Disconnected\n", event.source_nodeid); + printf(" node %d\n", event.Disconnected.node); + break; + case NDB_LE_NDBStartCompleted: + printf("Node %d: StartCompleted\n", event.source_nodeid); + printf(" version %d.%d.%d\n", + event.NDBStartCompleted.version >> 16 & 0xff, + event.NDBStartCompleted.version >> 8 & 0xff, + event.NDBStartCompleted.version >> 0 & 0xff); + break; + case NDB_LE_ArbitState: + printf("Node %d: ArbitState\n", event.source_nodeid); + printf(" code %d, arbit_node %d\n", + event.ArbitState.code & 0xffff, + event.ArbitResult.arbit_node); + break; + + default: + break; + } + } + } + + ndb_mgm_destroy_logevent_handle(&le); + ndb_mgm_destroy_handle(&h); + ndb_end(0); + return 0; +} diff --git a/ndb/examples/ndbapi_async_example/Makefile b/ndb/ndbapi-examples/ndbapi_async_example/Makefile index 4df9367fc29..4df9367fc29 100644 --- a/ndb/examples/ndbapi_async_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_async_example/Makefile diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp index 16731411c76..aa745f4d28d 100644 --- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp +++ b/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp @@ -57,16 +57,15 @@ /** * Helper sleep function */ -int +static void milliSleep(int milliseconds){ - int result = 0; - struct timespec sleeptime; + struct timeval sleeptime; sleeptime.tv_sec = milliseconds / 1000; - sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; - result = nanosleep(&sleeptime, NULL); - return result; + sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; + select(0, 0, 0, 0, &sleeptime); } + /** * error printout macro */ diff --git a/ndb/examples/ndbapi_async_example/readme.txt b/ndb/ndbapi-examples/ndbapi_async_example/readme.txt index 47cb4bf9ffa..47cb4bf9ffa 100644 --- a/ndb/examples/ndbapi_async_example/readme.txt +++ b/ndb/ndbapi-examples/ndbapi_async_example/readme.txt diff --git a/ndb/examples/ndbapi_async_example1/Makefile b/ndb/ndbapi-examples/ndbapi_async_example1/Makefile index b6fc31a00e5..b6fc31a00e5 100644 --- a/ndb/examples/ndbapi_async_example1/Makefile +++ b/ndb/ndbapi-examples/ndbapi_async_example1/Makefile diff --git a/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp b/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp index e8bc19e267b..e8bc19e267b 100644 --- a/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp +++ b/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp diff --git a/ndb/examples/ndbapi_event_example/Makefile b/ndb/ndbapi-examples/ndbapi_event_example/Makefile index 07d244c9346..07d244c9346 100644 --- a/ndb/examples/ndbapi_event_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_event_example/Makefile diff --git a/ndb/examples/ndbapi_event_example/ndbapi_event.cpp b/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp index 286f6fafbab..286f6fafbab 100644 --- a/ndb/examples/ndbapi_event_example/ndbapi_event.cpp +++ b/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp diff --git a/ndb/examples/ndbapi_retries_example/Makefile b/ndb/ndbapi-examples/ndbapi_retries_example/Makefile index c7a8946cd9a..c7a8946cd9a 100644 --- a/ndb/examples/ndbapi_retries_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_retries_example/Makefile diff --git a/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp b/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp index 8c29fe31446..8c29fe31446 100644 --- a/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp +++ b/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp diff --git a/ndb/examples/ndbapi_scan_example/Makefile b/ndb/ndbapi-examples/ndbapi_scan_example/Makefile index c5883757e5e..c5883757e5e 100644 --- a/ndb/examples/ndbapi_scan_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_scan_example/Makefile diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp index bb19eaaf1d6..69ffd99b8ca 100644 --- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp +++ b/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp @@ -72,18 +72,17 @@ #include <NdbApi.hpp> // Used for cout #include <iostream> +#include <stdio.h> /** * Helper sleep function */ -int +static void milliSleep(int milliseconds){ - int result = 0; - struct timespec sleeptime; + struct timeval sleeptime; sleeptime.tv_sec = milliseconds / 1000; - sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; - result = nanosleep(&sleeptime, NULL); - return result; + sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; + select(0, 0, 0, 0, &sleeptime); } @@ -103,6 +102,12 @@ milliSleep(int milliseconds){ struct Car { + /** + * Note memset, so that entire char-fields are cleared + * as all 20 bytes are significant (as type is char) + */ + Car() { memset(this, 0, sizeof(* this)); } + unsigned int reg_no; char brand[20]; char color[20]; @@ -792,8 +797,9 @@ int main() /** * Note! color needs to be of exact the same size as column defined */ - char color[20] = "Pink"; - if(scan_delete(&myNdb, column_color, color) > 0) + Car tmp; + sprintf(tmp.color, "Pink"); + if(scan_delete(&myNdb, column_color, tmp.color) > 0) std::cout << "scan_delete: Success!" << std::endl << std::endl; } @@ -804,11 +810,12 @@ int main() /** * Note! color1 & 2 need to be of exact the same size as column defined */ - char color1[20] = "Blue"; - char color2[20] = "Black"; - std::cout << "Going to update all " << color1 - << " cars to " << color2 << " cars!" << std::endl; - if(scan_update(&myNdb, column_color, color1, color2) > 0) + Car tmp1, tmp2; + sprintf(tmp1.color, "Blue"); + sprintf(tmp2.color, "Black"); + std::cout << "Going to update all " << tmp1.color + << " cars to " << tmp2.color << " cars!" << std::endl; + if(scan_update(&myNdb, column_color, tmp1.color, tmp2.color) > 0) std::cout << "scan_update: Success!" << std::endl << std::endl; } if(scan_print(&myNdb) > 0) diff --git a/ndb/examples/ndbapi_scan_example/readme.txt b/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt index 47cb4bf9ffa..47cb4bf9ffa 100644 --- a/ndb/examples/ndbapi_scan_example/readme.txt +++ b/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt diff --git a/ndb/examples/ndbapi_simple_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_example/Makefile index 99d4bfe68a6..99d4bfe68a6 100644 --- a/ndb/examples/ndbapi_simple_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_simple_example/Makefile diff --git a/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp b/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp index 152d4fa44af..152d4fa44af 100644 --- a/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp +++ b/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp diff --git a/ndb/examples/ndbapi_simple_index_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile index dc17ff0eeaa..dc17ff0eeaa 100644 --- a/ndb/examples/ndbapi_simple_index_example/Makefile +++ b/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile diff --git a/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp b/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp index 5afaf6078d1..5afaf6078d1 100644 --- a/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp +++ b/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 7ffcde88ce2..5a534b36b59 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -33,1277 +33,698 @@ EventLoggerBase::~EventLoggerBase() } -/** - * This matrix defines which event should be printed when - * - * threshold - is in range [0-15] - * severity - DEBUG to ALERT (Type of log message) - */ -const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { - // CONNECTION - { NDB_LE_Connected, LogLevel::llConnection, 8, Logger::LL_INFO }, - { NDB_LE_Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT }, - { NDB_LE_CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO }, - { NDB_LE_CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO }, - { NDB_LE_ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO }, - // CHECKPOINT - { NDB_LE_GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO }, - { NDB_LE_GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO }, - { NDB_LE_LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO }, - { NDB_LE_LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO }, - { NDB_LE_LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT }, - { NDB_LE_LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO }, - { NDB_LE_UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO }, - - // STARTUP - { NDB_LE_NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO }, - { NDB_LE_NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO }, - { NDB_LE_STTORRYRecieved, LogLevel::llStartUp,15, Logger::LL_INFO }, - { NDB_LE_StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO }, - { NDB_LE_CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO }, - { NDB_LE_CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO }, - { NDB_LE_FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO }, - { NDB_LE_NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO }, - { NDB_LE_NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO }, - { NDB_LE_StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO }, - { NDB_LE_StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO }, - { NDB_LE_UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO }, - - // NODERESTART - { NDB_LE_NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, - { NDB_LE_NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, - { NDB_LE_NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, - { NDB_LE_NR_CopyFragDone, LogLevel::llNodeRestart, 10, Logger::LL_INFO }, - { NDB_LE_NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO }, - - { NDB_LE_NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT}, - { NDB_LE_NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT}, - { NDB_LE_ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO }, - { NDB_LE_ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT}, - { NDB_LE_GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, - { NDB_LE_GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, - { NDB_LE_LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, - { NDB_LE_LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO }, - - // STATISTIC - { NDB_LE_TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO }, - { NDB_LE_OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO }, - { NDB_LE_TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO }, - { NDB_LE_JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, - { NDB_LE_SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, - { NDB_LE_ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO }, - { NDB_LE_MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO }, - - // ERROR - { NDB_LE_TransporterError, LogLevel::llError, 2, Logger::LL_ERROR }, - { NDB_LE_TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING }, - { NDB_LE_MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING }, - { NDB_LE_DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT }, - { NDB_LE_WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING }, - // INFO - { NDB_LE_SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO }, - { NDB_LE_CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO }, - { NDB_LE_InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO }, - - // Backup - { NDB_LE_BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO }, - { NDB_LE_BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO }, - { NDB_LE_BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT}, - { NDB_LE_BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT } -}; - -const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/ - sizeof(EventRepLogLevelMatrix); - -const char* -EventLogger::getText(char * m_text, size_t m_text_len, - int type, - const Uint32* theData, NodeId nodeId) -{ - // TODO: Change the switch implementation... - char theNodeId[32]; - if (nodeId != 0){ - BaseString::snprintf(theNodeId, 32, "Node %u: ", nodeId); - } else { - theNodeId[0] = 0; - } - - Ndb_logevent_type eventType = (Ndb_logevent_type)type; - switch (eventType){ - case NDB_LE_Connected: - BaseString::snprintf(m_text, m_text_len, - "%sNode %u Connected", - theNodeId, - theData[1]); - break; - case NDB_LE_ConnectedApiVersion: - BaseString::snprintf(m_text, m_text_len, - "%sNode %u: API version %d.%d.%d", - theNodeId, - theData[1], - getMajor(theData[2]), - getMinor(theData[2]), - getBuild(theData[2])); - break; - case NDB_LE_Disconnected: - BaseString::snprintf(m_text, m_text_len, - "%sNode %u Disconnected", - theNodeId, - theData[1]); - break; - case NDB_LE_CommunicationClosed: - //----------------------------------------------------------------------- - // REPORT communication to node closed. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sCommunication to Node %u closed", - theNodeId, - theData[1]); - break; - case NDB_LE_CommunicationOpened: - //----------------------------------------------------------------------- - // REPORT communication to node opened. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sCommunication to Node %u opened", - theNodeId, - theData[1]); - break; - case NDB_LE_NDBStartStarted: - //----------------------------------------------------------------------- - // Start of NDB has been initiated. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sStart initiated (version %d.%d.%d)", - theNodeId , - getMajor(theData[1]), - getMinor(theData[1]), - getBuild(theData[1])); - break; - case NDB_LE_NDBStopStarted: - BaseString::snprintf(m_text, m_text_len, - "%s%s shutdown initiated", - theNodeId, - (theData[1] == 1 ? "Cluster" : "Node")); - break; - case NDB_LE_NDBStopAborted: - BaseString::snprintf(m_text, m_text_len, - "%sNode shutdown aborted", - theNodeId); - break; - case NDB_LE_NDBStartCompleted: - //----------------------------------------------------------------------- - // Start of NDB has been completed. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sStarted (version %d.%d.%d)", - theNodeId , - getMajor(theData[1]), - getMinor(theData[1]), - getBuild(theData[1])); - - break; - case NDB_LE_STTORRYRecieved: - //----------------------------------------------------------------------- - // STTORRY recevied after restart finished. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sSTTORRY received after restart finished", - theNodeId); - break; - case NDB_LE_StartPhaseCompleted:{ - //----------------------------------------------------------------------- - // REPORT Start phase completed. - //----------------------------------------------------------------------- - const char * type = "<Unknown>"; - switch((NodeState::StartType)theData[2]){ - case NodeState::ST_INITIAL_START: - type = "(initial start)"; - break; - case NodeState::ST_SYSTEM_RESTART: - type = "(system restart)"; - break; - case NodeState::ST_NODE_RESTART: - type = "(node restart)"; - break; - case NodeState::ST_INITIAL_NODE_RESTART: - type = "(initial node restart)"; - break; - case NodeState::ST_ILLEGAL_TYPE: - type = ""; - break; - default:{ - BaseString::snprintf(m_text, m_text_len, - "%sStart phase %u completed (unknown = %d)", - theNodeId, - theData[1], - theData[2]); - return m_text; - } - } - BaseString::snprintf(m_text, m_text_len, - "%sStart phase %u completed %s", - theNodeId, - theData[1], - type); - return m_text; - break; - } - case NDB_LE_CM_REGCONF: - BaseString::snprintf(m_text, m_text_len, - "%sCM_REGCONF president = %u, own Node = %u, our dynamic id = %u" - , - theNodeId, - theData[2], - theData[1], - theData[3]); - break; - case NDB_LE_CM_REGREF: - { - const char* line = ""; - switch (theData[3]) { - case 0: - line = "Busy"; - break; - case 1: - line = "Election with wait = false"; - break; - case 2: - line = "Election with wait = false"; - break; - case 3: - line = "Not president"; - break; - case 4: - line = "Election without selecting new candidate"; - break; - default: - line = "No such cause"; - break; - }//switch - BaseString::snprintf(m_text, m_text_len, - "%sCM_REGREF from Node %u to our Node %u. Cause = %s", - theNodeId, - theData[2], - theData[1], - line); - } - break; - case NDB_LE_FIND_NEIGHBOURS: - //----------------------------------------------------------------------- - // REPORT Node Restart copied a fragment. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sWe are Node %u with dynamic ID %u, our left neighbour " - "is Node %u, our right is Node %u", - theNodeId, - theData[1], - theData[4], - theData[2], - theData[3]); - break; - case NDB_LE_NodeFailCompleted: - //----------------------------------------------------------------------- - // REPORT Node failure phase completed. - //----------------------------------------------------------------------- - if (theData[1] == 0) - { - if (theData[3] != 0) { - BaseString::snprintf(m_text, m_text_len, - "%sNode %u completed failure of Node %u", - theNodeId, - theData[3], - theData[2]); - } else { - BaseString::snprintf(m_text, m_text_len, - "%sAll nodes completed failure of Node %u", - theNodeId, - theData[2]); - }//if - } else { - const char* line = ""; - if (theData[1] == DBTC){ - line = "DBTC"; - }else if (theData[1] == DBDICT){ - line = "DBDICT"; - }else if (theData[1] == DBDIH){ - line = "DBDIH"; - }else if (theData[1] == DBLQH){ - line = "DBLQH"; - } - - BaseString::snprintf(m_text, m_text_len, - "%sNode failure of %u %s completed", - theNodeId, - theData[2], - line); - } - break; - case NDB_LE_NODE_FAILREP: - BaseString::snprintf(m_text, - m_text_len, - "%sNode %u has failed. The Node state at failure " - "was %u", - theNodeId, - theData[1], - theData[2]); +#define QQQQ char *m_text, size_t m_text_len, const Uint32* theData - break; - case NDB_LE_ArbitState: - //----------------------------------------------------------------------- - // REPORT arbitrator found or lost. - //----------------------------------------------------------------------- - { const ArbitSignalData* sd = (ArbitSignalData*)theData; - char ticketText[ArbitTicket::TextLength + 1]; - char errText[ArbitCode::ErrTextLength + 1]; - const unsigned code = sd->code & 0xFFFF; - const unsigned state = sd->code >> 16; - switch (code) { - case ArbitCode::ThreadStart: - BaseString::snprintf(m_text, m_text_len, - "%sPresident restarts arbitration thread [state=%u]", - theNodeId, state); - break; - case ArbitCode::PrepPart2: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "%sPrepare arbitrator node %u [ticket=%s]", - theNodeId, sd->node, ticketText); - break; - case ArbitCode::PrepAtrun: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "%sReceive arbitrator node %u [ticket=%s]", - theNodeId, sd->node, ticketText); - break; - case ArbitCode::ApiStart: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "%sStarted arbitrator node %u [ticket=%s]", - theNodeId, sd->node, ticketText); - break; - case ArbitCode::ApiFail: - BaseString::snprintf(m_text, m_text_len, - "%sLost arbitrator node %u - process failure [state=%u]", - theNodeId, sd->node, state); - break; - case ArbitCode::ApiExit: - BaseString::snprintf(m_text, m_text_len, - "%sLost arbitrator node %u - process exit [state=%u]", - theNodeId, sd->node, state); - break; - default: - ArbitCode::getErrText(code, errText, sizeof(errText)); - BaseString::snprintf(m_text, m_text_len, - "%sLost arbitrator node %u - %s [state=%u]", - theNodeId, sd->node, errText, state); - break; - } - } - break; - case NDB_LE_ArbitResult: - //----------------------------------------------------------------------- - // REPORT arbitration result (the failures may not reach us). - //----------------------------------------------------------------------- - { const ArbitSignalData* sd = (ArbitSignalData*)theData; - char errText[ArbitCode::ErrTextLength + 1]; - const unsigned code = sd->code & 0xFFFF; - const unsigned state = sd->code >> 16; - switch (code) { - case ArbitCode::LoseNodes: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration check lost - less than 1/2 nodes left", - theNodeId); - break; - case ArbitCode::WinNodes: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration check won - all node groups and more than 1/2 nodes left", - theNodeId); - break; - case ArbitCode::WinGroups: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration check won - node group majority", - theNodeId); - break; - case ArbitCode::LoseGroups: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration check lost - missing node group", - theNodeId); - break; - case ArbitCode::Partitioning: - BaseString::snprintf(m_text, m_text_len, - "%sNetwork partitioning - arbitration required", - theNodeId); - break; - case ArbitCode::WinChoose: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration won - positive reply from node %u", - theNodeId, sd->node); - break; - case ArbitCode::LoseChoose: - BaseString::snprintf(m_text, m_text_len, - "%sArbitration lost - negative reply from node %u", - theNodeId, sd->node); - break; - case ArbitCode::LoseNorun: - BaseString::snprintf(m_text, m_text_len, - "%sNetwork partitioning - no arbitrator available", - theNodeId); - break; - case ArbitCode::LoseNocfg: - BaseString::snprintf(m_text, m_text_len, - "%sNetwork partitioning - no arbitrator configured", - theNodeId); - break; - default: - ArbitCode::getErrText(code, errText, sizeof(errText)); - BaseString::snprintf(m_text, m_text_len, - "%sArbitration failure - %s [state=%u]", - theNodeId, errText, state); - break; - } - } +void getTextConnected(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node %u Connected", + theData[1]); +} +void getTextConnectedApiVersion(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node %u: API version %d.%d.%d", + theData[1], + getMajor(theData[2]), + getMinor(theData[2]), + getBuild(theData[2])); +} +void getTextDisconnected(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node %u Disconnected", + theData[1]); +} +void getTextCommunicationClosed(QQQQ) { + //----------------------------------------------------------------------- + // REPORT communication to node closed. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Communication to Node %u closed", + theData[1]); +} +void getTextCommunicationOpened(QQQQ) { + //----------------------------------------------------------------------- + // REPORT communication to node opened. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Communication to Node %u opened", + theData[1]); +} +void getTextNDBStartStarted(QQQQ) { + //----------------------------------------------------------------------- + // Start of NDB has been initiated. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Start initiated (version %d.%d.%d)", + getMajor(theData[1]), + getMinor(theData[1]), + getBuild(theData[1])); +} +void getTextNDBStopStarted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "%s shutdown initiated", + (theData[1] == 1 ? "Cluster" : "Node")); +} +void getTextNDBStopAborted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node shutdown aborted"); +} +void getTextNDBStartCompleted(QQQQ) { + //----------------------------------------------------------------------- + // Start of NDB has been completed. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Started (version %d.%d.%d)", + getMajor(theData[1]), + getMinor(theData[1]), + getBuild(theData[1])); +} +void getTextSTTORRYRecieved(QQQQ) { + //----------------------------------------------------------------------- + // STTORRY recevied after restart finished. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "STTORRY received after restart finished"); +} +void getTextStartPhaseCompleted(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Start phase completed. + //----------------------------------------------------------------------- + const char *type = "<Unknown>"; + switch((NodeState::StartType)theData[2]){ + case NodeState::ST_INITIAL_START: + type = "(initial start)"; break; - case NDB_LE_GlobalCheckpointStarted: - //----------------------------------------------------------------------- - // This event reports that a global checkpoint has been started and this - // node is the master of this global checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sGlobal checkpoint %u started", - theNodeId, - theData[1]); + case NodeState::ST_SYSTEM_RESTART: + type = "(system restart)"; break; - case NDB_LE_GlobalCheckpointCompleted: - //----------------------------------------------------------------------- - // This event reports that a global checkpoint has been completed on this - // node and the node is the master of this global checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "%sGlobal checkpoint %u completed", - theNodeId, - theData[1]); + case NodeState::ST_NODE_RESTART: + type = "(node restart)"; break; - case NDB_LE_LocalCheckpointStarted: - //----------------------------------------------------------------------- - // This event reports that a local checkpoint has been started and this - // node is the master of this local checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sLocal checkpoint %u started. " - "Keep GCI = %u oldest restorable GCI = %u", - theNodeId, - theData[1], - theData[2], - theData[3]); + case NodeState::ST_INITIAL_NODE_RESTART: + type = "(initial node restart)"; break; - case NDB_LE_LocalCheckpointCompleted: - //----------------------------------------------------------------------- - // This event reports that a local checkpoint has been completed on this - // node and the node is the master of this local checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sLocal checkpoint %u completed", - theNodeId, - theData[1]); + case NodeState::ST_ILLEGAL_TYPE: + type = ""; break; - case NDB_LE_TableCreated: - //----------------------------------------------------------------------- - // This event reports that a table has been created. - //----------------------------------------------------------------------- + default: BaseString::snprintf(m_text, m_text_len, - "%sTable with ID = %u created", - theNodeId, - theData[1]); - break; - case NDB_LE_LCPStoppedInCalcKeepGci: - if (theData[1] == 0) - BaseString::snprintf(m_text, m_text_len, - "%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI", - theNodeId); - break; - case NDB_LE_NR_CopyDict: - //----------------------------------------------------------------------- - // REPORT Node Restart completed copy of dictionary information. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sNode restart completed copy of dictionary information", - theNodeId); - break; - case NDB_LE_NR_CopyDistr: - //----------------------------------------------------------------------- - // REPORT Node Restart completed copy of distribution information. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sNode restart completed copy of distribution information", - theNodeId); - break; - case NDB_LE_NR_CopyFragsStarted: - //----------------------------------------------------------------------- - // REPORT Node Restart is starting to copy the fragments. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sNode restart starting to copy the fragments " - "to Node %u", - theNodeId, - theData[1]); - break; - case NDB_LE_NR_CopyFragDone: - //----------------------------------------------------------------------- - // REPORT Node Restart copied a fragment. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sTable ID = %u, fragment ID = %u have been copied " - "to Node %u", - theNodeId, - theData[2], - theData[3], - theData[1]); - break; - case NDB_LE_NR_CopyFragsCompleted: - BaseString::snprintf(m_text, - m_text_len, - "%sNode restart completed copying the fragments " - "to Node %u", - theNodeId, - theData[1]); - break; - case NDB_LE_LCPFragmentCompleted: - BaseString::snprintf(m_text, - m_text_len, - "%sTable ID = %u, fragment ID = %u has completed LCP " - "on Node %u", - theNodeId, - theData[2], - theData[3], - theData[1]); + "Start phase %u completed (unknown = %d)", + theData[1], + theData[2]); + return; + } + BaseString::snprintf(m_text, m_text_len, + "Start phase %u completed %s", + theData[1], + type); +} +void getTextCM_REGCONF(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "CM_REGCONF president = %u, own Node = %u, our dynamic id = %u", + theData[2], + theData[1], + theData[3]); +} +void getTextCM_REGREF(QQQQ) { + const char* line = ""; + switch (theData[3]) { + case 0: + line = "Busy"; break; - case NDB_LE_TransReportCounters: - // ------------------------------------------------------------------- - // Report information about transaction activity once per 10 seconds. - // ------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sTrans. Count = %u, Commit Count = %u, " - "Read Count = %u, Simple Read Count = %u,\n" - "Write Count = %u, AttrInfo Count = %u, " - "Concurrent Operations = %u, Abort Count = %u\n" - " Scans: %u Range scans: %u", - theNodeId, - theData[1], - theData[2], - theData[3], - theData[4], - theData[5], - theData[6], - theData[7], - theData[8], - theData[9], - theData[10]); + case 1: + line = "Election with wait = false"; break; - case NDB_LE_OperationReportCounters: - BaseString::snprintf(m_text, m_text_len, - "%sOperations=%u", - theNodeId, - theData[1]); + case 2: + line = "Election with wait = false"; break; - case NDB_LE_UndoLogBlocked: - //----------------------------------------------------------------------- - // REPORT Undo Logging blocked due to buffer near to overflow. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sACC Blocked %u and TUP Blocked %u times last second", - theNodeId, - theData[1], - theData[2]); - break; - case NDB_LE_TransporterError: - case NDB_LE_TransporterWarning: - BaseString::snprintf(m_text, - m_text_len, - "%sTransporter to node %d reported error 0x%x", - theNodeId, - theData[1], - theData[2]); - break; - case NDB_LE_MissedHeartbeat: - //----------------------------------------------------------------------- - // REPORT Undo Logging blocked due to buffer near to overflow. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sNode %d missed heartbeat %d", - theNodeId, - theData[1], - theData[2]); - break; - case NDB_LE_DeadDueToHeartbeat: - //----------------------------------------------------------------------- - // REPORT Undo Logging blocked due to buffer near to overflow. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, - m_text_len, - "%sNode %d declared dead due to missed heartbeat", - theNodeId, - theData[1]); - break; - case NDB_LE_JobStatistic: - BaseString::snprintf(m_text, - m_text_len, - "%sMean loop Counter in doJob last 8192 times = %u", - theNodeId, - theData[1]); + case 3: + line = "Not president"; break; - case NDB_LE_SendBytesStatistic: - BaseString::snprintf(m_text, - m_text_len, - "%sMean send size to Node = %d last 4096 sends = %u bytes", - theNodeId, - theData[1], - theData[2]); + case 4: + line = "Election without selecting new candidate"; break; - case NDB_LE_ReceiveBytesStatistic: - BaseString::snprintf(m_text, - m_text_len, - "%sMean receive size to Node = %d last 4096 sends = %u bytes", - theNodeId, - theData[1], - theData[2]); - break; - case NDB_LE_SentHeartbeat: - BaseString::snprintf(m_text, - m_text_len, - "%sNode Sent Heartbeat to node = %d", - theNodeId, - theData[1]); - break; - case NDB_LE_CreateLogBytes: - BaseString::snprintf(m_text, - m_text_len, - "%sLog part %u, log file %u, MB %u", - theNodeId, - theData[1], - theData[2], - theData[3]); - break; - case NDB_LE_StartLog: - BaseString::snprintf(m_text, - m_text_len, - "%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u", - theNodeId, - theData[1], - theData[2], - theData[3], - theData[4]); - break; - case NDB_LE_StartREDOLog: - BaseString::snprintf(m_text, - m_text_len, - "%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]", - theNodeId, - theData[1], - theData[2], - theData[3], - theData[4]); - break; - case NDB_LE_UNDORecordsExecuted:{ - const char* line = ""; - if (theData[1] == DBTUP){ - line = "DBTUP"; - }else if (theData[1] == DBACC){ - line = "DBACC"; - } - - BaseString::snprintf(m_text, - m_text_len, - "%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]", - theNodeId, - line, - theData[2], - theData[3], - theData[4], - theData[5], - theData[6], - theData[7], - theData[8], - theData[9], - theData[10], - theData[11]); - } - break; - case NDB_LE_InfoEvent: - BaseString::snprintf(m_text, - m_text_len, - "%s%s", - theNodeId, - (char *)&theData[1]); - break; - case NDB_LE_WarningEvent: - BaseString::snprintf(m_text, - m_text_len, - "%s%s", - theNodeId, - (char *)&theData[1]); - break; - case NDB_LE_GCP_TakeoverStarted: - BaseString::snprintf(m_text, - m_text_len, - "%sGCP Take over started", theNodeId); - break; - case NDB_LE_GCP_TakeoverCompleted: - BaseString::snprintf(m_text, - m_text_len, - "%sGCP Take over completed", theNodeId); - break; - case NDB_LE_LCP_TakeoverStarted: - BaseString::snprintf(m_text, - m_text_len, - "%sLCP Take over started", theNodeId); - break; - case NDB_LE_LCP_TakeoverCompleted: - BaseString::snprintf(m_text, - m_text_len, - "%sLCP Take over completed (state = %d)", - theNodeId, theData[1]); - break; - case NDB_LE_MemoryUsage:{ - const int gth = theData[1]; - const int size = theData[2]; - const int used = theData[3]; - const int total = theData[4]; - const int block = theData[5]; - const int percent = (used*100)/total; - - BaseString::snprintf(m_text, m_text_len, - "%s%s usage %s %d%s(%d %dK pages of total %d)", - theNodeId, - (block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")), - (gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")), - percent, "%", - used, size/1024, total - ); + default: + line = "No such cause"; break; - } - case NDB_LE_GrepSubscriptionInfo : - { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) { - case GrepEvent::GrepSS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_CreateSubIdConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Created subscription id" - " (subId=%d,SubKey=%d)" - " Return code: %d.", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int nodegrp = theData[5]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Created subscription using" - " (subId=%d,SubKey=%d)" - " in primary system. Primary system has %d nodegroup(s)." - " Return code: %d", - subId, - subKey, - nodegrp, - err); - break; - } - case GrepEvent::GrepPS_SubCreateConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have created " - "subscriptions" - " using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging started on meta data changes." - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - "logging meta data" - " changes on the subscription subId=%d,SubKey=%d) " - "(N.I yet)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubStartDataConf: { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging started on table data changes " - " using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubStartDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started logging " - "table data changes on the subscription " - "subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - " synchronization on meta data (META SCAN) using " - "(subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncMetaConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization started (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepPS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have started " - "synchronization " - " on table data (DATA SCAN) using (subId=%d,SubKey=%d)." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubSyncDataConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization started (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Return code: %d", - subId, - subKey, - gci, - err); - break; - } - case GrepEvent::GrepPS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; + }//switch + + BaseString::snprintf(m_text, m_text_len, + "CM_REGREF from Node %u to our Node %u. Cause = %s", + theData[2], + theData[1], + line); +} +void getTextFIND_NEIGHBOURS(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node Restart copied a fragment. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "We are Node %u with dynamic ID %u, our left neighbour " + "is Node %u, our right is Node %u", + theData[1], + theData[4], + theData[2], + theData[3]); +} +void getTextNodeFailCompleted(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node failure phase completed. + //----------------------------------------------------------------------- + if (theData[1] == 0) + { + if (theData[3] != 0) { BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: All participants have removed " - "subscription (subId=%d,SubKey=%d). I have cleaned " - "up resources I've used." - " Return code: %d", - subId, - subKey, - err); - break; - } - case GrepEvent::GrepSS_SubRemoveConf: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; + "Node %u completed failure of Node %u", + theData[3], + theData[2]); + } else { BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Removed subscription " - "(subId=%d,SubKey=%d)" - " Return code: %d", - subId, - subKey, - err); - break; - } - default: - BaseString::snprintf(m_text, - m_text_len, - "%sUnknown GrepSubscriptonInfo event: %d", - theNodeId, - theData[1]); + "All nodes completed failure of Node %u", + theData[2]); + }//if + } else { + const char* line = ""; + if (theData[1] == DBTC){ + line = "DBTC"; + }else if (theData[1] == DBDICT){ + line = "DBDICT"; + }else if (theData[1] == DBDIH){ + line = "DBDIH"; + }else if (theData[1] == DBLQH){ + line = "DBLQH"; } - break; + BaseString::snprintf(m_text, m_text_len, + "Node failure of %u %s completed", + theData[2], + line); } - - case NDB_LE_GrepSubscriptionAlert : +} +void getTextNODE_FAILREP(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node %u has failed. The Node state at failure " + "was %u", + theData[1], + theData[2]); +} +void getTextArbitState(QQQQ) { + //----------------------------------------------------------------------- + // REPORT arbitrator found or lost. + //----------------------------------------------------------------------- { - GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1]; - switch(event) - { - case GrepEvent::GrepSS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord:Error code: %d Error message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::GE_Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepSS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepSS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepSS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d" - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepSS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::SSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d). " - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err) - ); - break; - } - - case GrepEvent::GrepPS_CreateSubIdRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Error code: %d Error Message: %s" - " (subId=%d,SubKey=%d)", - err, - GrepError::getErrorDesc((GrepError::GE_Code)err), - subId, - subKey); - break; - } - case GrepEvent::GrepPS_SubCreateRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: FAILED to Created subscription using" - " (subId=%d,SubKey=%d)in primary system." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); + const ArbitSignalData* sd = (ArbitSignalData*)theData; + char ticketText[ArbitTicket::TextLength + 1]; + char errText[ArbitCode::ErrTextLength + 1]; + const unsigned code = sd->code & 0xFFFF; + const unsigned state = sd->code >> 16; + switch (code) { + case ArbitCode::ThreadStart: + BaseString::snprintf(m_text, m_text_len, + "President restarts arbitration thread [state=%u]", + state); + break; + case ArbitCode::PrepPart2: + sd->ticket.getText(ticketText, sizeof(ticketText)); + BaseString::snprintf(m_text, m_text_len, + "Prepare arbitrator node %u [ticket=%s]", + sd->node, ticketText); + break; + case ArbitCode::PrepAtrun: + sd->ticket.getText(ticketText, sizeof(ticketText)); + BaseString::snprintf(m_text, m_text_len, + "Receive arbitrator node %u [ticket=%s]", + sd->node, ticketText); + break; + case ArbitCode::ApiStart: + sd->ticket.getText(ticketText, sizeof(ticketText)); + BaseString::snprintf(m_text, m_text_len, + "Started arbitrator node %u [ticket=%s]", + sd->node, ticketText); + break; + case ArbitCode::ApiFail: + BaseString::snprintf(m_text, m_text_len, + "Lost arbitrator node %u - process failure [state=%u]", + sd->node, state); + break; + case ArbitCode::ApiExit: + BaseString::snprintf(m_text, m_text_len, + "Lost arbitrator node %u - process exit [state=%u]", + sd->node, state); break; - } - case GrepEvent::GrepPS_SubStartMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Logging failed to start on meta " - "data changes." - " using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepPS_SubStartDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Logging FAILED to start on table data " - " changes using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncMetaRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Synchronization FAILED (META SCAN) on " - " meta data using (subId=%d,SubKey=%d)" - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::GrepPS_SubSyncDataRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - const int gci = theData[5]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on " - "table data using (subId=%d,SubKey=%d). GCI = %d. " - " Error code: %d Error Message: %s", - subId, - subKey, - gci, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); + default: + ArbitCode::getErrText(code, errText, sizeof(errText)); + BaseString::snprintf(m_text, m_text_len, + "Lost arbitrator node %u - %s [state=%u]", + sd->node, errText, state); break; } - case GrepEvent::GrepPS_SubRemoveRef: - { - const int subId = theData[2]; - const int subKey = theData[3]; - const int err = theData[4]; - BaseString::snprintf(m_text, m_text_len, - "Grep::PSCoord: Failed to remove subscription " - "(subId=%d,SubKey=%d)." - " Error code: %d Error Message: %s", - subId, - subKey, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); - break; - } - case GrepEvent::Rep_Disconnect: - { - const int err = theData[4]; - const int nodeId = theData[5]; - BaseString::snprintf(m_text, m_text_len, - "Rep: Node %d." - " Error code: %d Error Message: %s", - nodeId, - err, - GrepError::getErrorDesc((GrepError::GE_Code)err)); + } +} + +void getTextArbitResult(QQQQ) { + //----------------------------------------------------------------------- + // REPORT arbitration result (the failures may not reach us). + //----------------------------------------------------------------------- + { + const ArbitSignalData* sd = (ArbitSignalData*)theData; + char errText[ArbitCode::ErrTextLength + 1]; + const unsigned code = sd->code & 0xFFFF; + const unsigned state = sd->code >> 16; + switch (code) { + case ArbitCode::LoseNodes: + BaseString::snprintf(m_text, m_text_len, + "Arbitration check lost - less than 1/2 nodes left"); + break; + case ArbitCode::WinNodes: + BaseString::snprintf(m_text, m_text_len, + "Arbitration check won - all node groups and more than 1/2 nodes left"); + break; + case ArbitCode::WinGroups: + BaseString::snprintf(m_text, m_text_len, + "Arbitration check won - node group majority"); + break; + case ArbitCode::LoseGroups: + BaseString::snprintf(m_text, m_text_len, + "Arbitration check lost - missing node group"); + break; + case ArbitCode::Partitioning: + BaseString::snprintf(m_text, m_text_len, + "Network partitioning - arbitration required"); + break; + case ArbitCode::WinChoose: + BaseString::snprintf(m_text, m_text_len, + "Arbitration won - positive reply from node %u", + sd->node); + break; + case ArbitCode::LoseChoose: + BaseString::snprintf(m_text, m_text_len, + "Arbitration lost - negative reply from node %u", + sd->node); + break; + case ArbitCode::LoseNorun: + BaseString::snprintf(m_text, m_text_len, + "Network partitioning - no arbitrator available"); + break; + case ArbitCode::LoseNocfg: + BaseString::snprintf(m_text, m_text_len, + "Network partitioning - no arbitrator configured"); break; - } - - default: - BaseString::snprintf(m_text, - m_text_len, - "%sUnknown GrepSubscriptionAlert event: %d", - theNodeId, - theData[1]); - break; + ArbitCode::getErrText(code, errText, sizeof(errText)); + BaseString::snprintf(m_text, m_text_len, + "Arbitration failure - %s [state=%u]", + errText, state); + break; } - break; } - - case NDB_LE_BackupStarted: - BaseString::snprintf(m_text, - m_text_len, - "%sBackup %d started from node %d", - theNodeId, theData[2], refToNode(theData[1])); - break; - case NDB_LE_BackupFailedToStart: - BaseString::snprintf(m_text, - m_text_len, - "%sBackup request from %d failed to start. Error: %d", - theNodeId, refToNode(theData[1]), theData[2]); - break; - case NDB_LE_BackupCompleted: - BaseString::snprintf(m_text, - m_text_len, - "%sBackup %u started from node %u completed\n" - " StartGCP: %u StopGCP: %u\n" - " #Records: %u #LogRecords: %u\n" - " Data: %u bytes Log: %u bytes", - theNodeId, theData[2], refToNode(theData[1]), - theData[3], theData[4], theData[6], theData[8], - theData[5], theData[7]); - break; - case NDB_LE_BackupAborted: - BaseString::snprintf(m_text, - m_text_len, - "%sBackup %d started from %d has been aborted. Error: %d", - theNodeId, - theData[2], - refToNode(theData[1]), - theData[3]); - break; - default: - BaseString::snprintf(m_text, - m_text_len, - "%sUnknown event: %d", - theNodeId, - theData[0]); - +} +void getTextGlobalCheckpointStarted(QQQQ) { + //----------------------------------------------------------------------- + // This event reports that a global checkpoint has been started and this + // node is the master of this global checkpoint. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Global checkpoint %u started", + theData[1]); +} +void getTextGlobalCheckpointCompleted(QQQQ) { + //----------------------------------------------------------------------- + // This event reports that a global checkpoint has been completed on this + // node and the node is the master of this global checkpoint. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Global checkpoint %u completed", + theData[1]); +} +void getTextLocalCheckpointStarted(QQQQ) { + //----------------------------------------------------------------------- + // This event reports that a local checkpoint has been started and this + // node is the master of this local checkpoint. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Local checkpoint %u started. " + "Keep GCI = %u oldest restorable GCI = %u", + theData[1], + theData[2], + theData[3]); +} +void getTextLocalCheckpointCompleted(QQQQ) { + //----------------------------------------------------------------------- + // This event reports that a local checkpoint has been completed on this + // node and the node is the master of this local checkpoint. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Local checkpoint %u completed", + theData[1]); +} +void getTextTableCreated(QQQQ) { + //----------------------------------------------------------------------- + // This event reports that a table has been created. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Table with ID = %u created", + theData[1]); +} +/* STRANGE */ +void getTextLCPStoppedInCalcKeepGci(QQQQ) { + if (theData[1] == 0) + BaseString::snprintf(m_text, m_text_len, + "Local Checkpoint stopped in CALCULATED_KEEP_GCI"); +} +void getTextNR_CopyDict(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node Restart completed copy of dictionary information. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Node restart completed copy of dictionary information"); +} +void getTextNR_CopyDistr(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node Restart completed copy of distribution information. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Node restart completed copy of distribution information"); +} +void getTextNR_CopyFragsStarted(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node Restart is starting to copy the fragments. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Node restart starting to copy the fragments " + "to Node %u", + theData[1]); +} +void getTextNR_CopyFragDone(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Node Restart copied a fragment. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Table ID = %u, fragment ID = %u have been copied " + "to Node %u", + theData[2], + theData[3], + theData[1]); +} +void getTextNR_CopyFragsCompleted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node restart completed copying the fragments " + "to Node %u", + theData[1]); +} +void getTextLCPFragmentCompleted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Table ID = %u, fragment ID = %u has completed LCP " + "on Node %u", + theData[2], + theData[3], + theData[1]); +} +void getTextTransReportCounters(QQQQ) { + // ------------------------------------------------------------------- + // Report information about transaction activity once per 10 seconds. + // ------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Trans. Count = %u, Commit Count = %u, " + "Read Count = %u, Simple Read Count = %u,\n" + "Write Count = %u, AttrInfo Count = %u, " + "Concurrent Operations = %u, Abort Count = %u\n" + " Scans: %u Range scans: %u", + theData[1], + theData[2], + theData[3], + theData[4], + theData[5], + theData[6], + theData[7], + theData[8], + theData[9], + theData[10]); +} +void getTextOperationReportCounters(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Operations=%u", + theData[1]); +} +void getTextUndoLogBlocked(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Undo Logging blocked due to buffer near to overflow. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "ACC Blocked %u and TUP Blocked %u times last second", + theData[1], + theData[2]); +} +void getTextTransporterError(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Transporter to node %d reported error 0x%x", + theData[1], + theData[2]); +} +void getTextTransporterWarning(QQQQ) { + getTextTransporterError(m_text, m_text_len, theData); +} +void getTextMissedHeartbeat(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Undo Logging blocked due to buffer near to overflow. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Node %d missed heartbeat %d", + theData[1], + theData[2]); +} +void getTextDeadDueToHeartbeat(QQQQ) { + //----------------------------------------------------------------------- + // REPORT Undo Logging blocked due to buffer near to overflow. + //----------------------------------------------------------------------- + BaseString::snprintf(m_text, m_text_len, + "Node %d declared dead due to missed heartbeat", + theData[1]); +} +void getTextJobStatistic(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Mean loop Counter in doJob last 8192 times = %u", + theData[1]); +} +void getTextSendBytesStatistic(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Mean send size to Node = %d last 4096 sends = %u bytes", + theData[1], + theData[2]); +} +void getTextReceiveBytesStatistic(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Mean receive size to Node = %d last 4096 sends = %u bytes", + theData[1], + theData[2]); +} +void getTextSentHeartbeat(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node Sent Heartbeat to node = %d", + theData[1]); +} +void getTextCreateLogBytes(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Log part %u, log file %u, MB %u", + theData[1], + theData[2], + theData[3]); +} +void getTextStartLog(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Log part %u, start MB %u, stop MB %u, last GCI, log exec %u", + theData[1], + theData[2], + theData[3], + theData[4]); +} +void getTextStartREDOLog(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Node: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]", + theData[1], + theData[2], + theData[3], + theData[4]); +} +void getTextUNDORecordsExecuted(QQQQ) { + const char* line = ""; + if (theData[1] == DBTUP){ + line = "DBTUP"; + }else if (theData[1] == DBACC){ + line = "DBACC"; } - return m_text; + + BaseString::snprintf(m_text, m_text_len, + " UNDO %s %d [%d %d %d %d %d %d %d %d %d]", + line, + theData[2], + theData[3], + theData[4], + theData[5], + theData[6], + theData[7], + theData[8], + theData[9], + theData[10], + theData[11]); +} +void getTextInfoEvent(QQQQ) { + BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]); +} +void getTextWarningEvent(QQQQ) { + BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]); +} +void getTextGCP_TakeoverStarted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, "GCP Take over started"); +} +void getTextGCP_TakeoverCompleted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, "GCP Take over completed"); +} +void getTextLCP_TakeoverStarted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, "LCP Take over started"); +} +void getTextLCP_TakeoverCompleted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "LCP Take over completed (state = %d)", + theData[1]); +} +void getTextMemoryUsage(QQQQ) { + const int gth = theData[1]; + const int size = theData[2]; + const int used = theData[3]; + const int total = theData[4]; + const int block = theData[5]; + const int percent = (used*100)/total; + + BaseString::snprintf(m_text, m_text_len, + "%s usage %s %d%s(%d %dK pages of total %d)", + (block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")), + (gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")), + percent, "%", + used, size/1024, total + ); +} + +void getTextBackupStarted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Backup %d started from node %d", + theData[2], refToNode(theData[1])); +} +void getTextBackupFailedToStart(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Backup request from %d failed to start. Error: %d", + refToNode(theData[1]), theData[2]); +} +void getTextBackupCompleted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Backup %u started from node %u completed\n" + " StartGCP: %u StopGCP: %u\n" + " #Records: %u #LogRecords: %u\n" + " Data: %u bytes Log: %u bytes", + theData[2], refToNode(theData[1]), + theData[3], theData[4], theData[6], theData[8], + theData[5], theData[7]); +} +void getTextBackupAborted(QQQQ) { + BaseString::snprintf(m_text, m_text_len, + "Backup %d started from %d has been aborted. Error: %d", + theData[2], + refToNode(theData[1]), + theData[3]); } +#if 0 +BaseString::snprintf(m_text, + m_text_len, + "Unknown event: %d", + theData[0]); +#endif + +/** + * This matrix defines which event should be printed when + * + * threshold - is in range [0-15] + * severity - DEBUG to ALERT (Type of log message) + */ + +#define ROW(a,b,c,d) \ +{ NDB_LE_ ## a, b, c, d, getText ## a} + +const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { + // CONNECTION + ROW(Connected, LogLevel::llConnection, 8, Logger::LL_INFO ), + ROW(Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT ), + ROW(CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO ), + ROW(CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO ), + ROW(ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO ), + // CHECKPOINT + ROW(GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO ), + ROW(GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO ), + ROW(LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO ), + ROW(LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO ), + ROW(LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT ), + ROW(LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO ), + ROW(UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO ), + + // STARTUP + ROW(NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ), + ROW(NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ), + ROW(STTORRYRecieved, LogLevel::llStartUp, 15, Logger::LL_INFO ), + ROW(StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO ), + ROW(CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO ), + ROW(CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO ), + ROW(FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO ), + ROW(NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ), + ROW(NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO ), + ROW(StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO ), + ROW(StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO ), + ROW(UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO ), + + // NODERESTART + ROW(NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), + ROW(NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), + ROW(NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), + ROW(NR_CopyFragDone, LogLevel::llNodeRestart,10, Logger::LL_INFO ), + ROW(NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), + + ROW(NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT), + ROW(NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT), + ROW(ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO ), + ROW(ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT), + ROW(GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), + ROW(GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), + ROW(LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), + ROW(LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), + + // STATISTIC + ROW(TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ), + ROW(OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ), + ROW(TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO ), + ROW(JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), + ROW(SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), + ROW(ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), + ROW(MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO ), + + // ERROR + ROW(TransporterError, LogLevel::llError, 2, Logger::LL_ERROR ), + ROW(TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING ), + ROW(MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING ), + ROW(DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT ), + ROW(WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING ), + // INFO + ROW(SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO ), + ROW(CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO ), + ROW(InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO ), + + // Backup + ROW(BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO ), + ROW(BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO ), + ROW(BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT), + ROW(BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT ) +}; + +const Uint32 EventLoggerBase::matrixSize= +sizeof(EventLoggerBase::matrix)/sizeof(EventRepLogLevelMatrix); + EventLogger::EventLogger() : m_filterLevel(15) { setCategory("EventLogger"); @@ -1342,19 +763,37 @@ int EventLoggerBase::event_lookup(int eventType, LogLevel::EventCategory &cat, Uint32 &threshold, - Logger::LoggerLevel &severity) + Logger::LoggerLevel &severity, + EventTextFunction &textF) { for(unsigned i = 0; i<EventLoggerBase::matrixSize; i++){ if(EventLoggerBase::matrix[i].eventType == eventType){ cat = EventLoggerBase::matrix[i].eventCategory; threshold = EventLoggerBase::matrix[i].threshold; severity = EventLoggerBase::matrix[i].severity; + textF= EventLoggerBase::matrix[i].textF; return 0; } } return 1; } +const char* +EventLogger::getText(char * dst, size_t dst_len, + EventTextFunction textF, + const Uint32* theData, NodeId nodeId ) +{ + int pos= 0; + if (nodeId != 0) + { + BaseString::snprintf(dst, dst_len, "Node %u: ", nodeId); + pos= strlen(dst); + } + if (dst_len-pos > 0) + textF(dst+pos,dst_len-pos,theData); + return dst; +} + void EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId, const LogLevel* ll) @@ -1362,52 +801,43 @@ EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId, Uint32 threshold = 0; Logger::LoggerLevel severity = Logger::LL_WARNING; LogLevel::EventCategory cat= LogLevel::llInvalid; + EventTextFunction textF; DBUG_ENTER("EventLogger::log"); DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId)); - if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity)) + if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF)) DBUG_VOID_RETURN; Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat); DBUG_PRINT("info",("threshold=%d, set=%d", threshold, set)); if (ll) DBUG_PRINT("info",("m_logLevel.getLogLevel=%d", m_logLevel.getLogLevel(cat))); + if (threshold <= set){ + getText(m_text,sizeof(m_text),textF,theData,nodeId); + switch (severity){ case Logger::LL_ALERT: - alert(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + alert(m_text); break; - case Logger::LL_CRITICAL: - critical(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + critical(m_text); break; - case Logger::LL_WARNING: - warning(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + warning(m_text); break; - case Logger::LL_ERROR: - error(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + error(m_text); break; - case Logger::LL_INFO: - info(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + info(m_text); break; - case Logger::LL_DEBUG: - debug(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + debug(m_text); break; - default: - info(EventLogger::getText(m_text, sizeof(m_text), - eventType, theData, nodeId)); + info(m_text); break; } } // if (.. diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp index e25aeee8813..f188a433f1b 100644 --- a/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -111,175 +111,6 @@ IPCConfig::addRemoteNodeId(NodeId nodeId){ } /** - * Returns no of transporters configured - */ -int -IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){ - int noOfTransportersCreated = 0; - - Uint32 noOfConnections; - if(!props->get("NoOfConnections", &noOfConnections)) return -1; - - for (Uint32 i = 0; i < noOfConnections; i++){ - const Properties * tmp; - Uint32 nodeId1, nodeId2; - const char * host1; - const char * host2; - - if(!props->get("Connection", i, &tmp)) continue; - if(!tmp->get("NodeId1", &nodeId1)) continue; - if(!tmp->get("NodeId2", &nodeId2)) continue; - if(nodeId1 != the_ownId && nodeId2 != the_ownId) continue; - - Uint32 sendSignalId; - Uint32 compression; - Uint32 checksum; - if(!tmp->get("SendSignalId", &sendSignalId)) continue; - if(!tmp->get("Checksum", &checksum)) continue; - - const char * type; - if(!tmp->get("Type", &type)) continue; - - if(strcmp("SHM", type) == 0){ - SHM_TransporterConfiguration conf; - conf.localNodeId = the_ownId; - conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2); - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(!tmp->get("ShmKey", &conf.shmKey)) continue; - if(!tmp->get("ShmSize", &conf.shmSize)) continue; - - if(!theTransporterRegistry->createTransporter(&conf)){ - ndbout << "Failed to create SHM Transporter from: " - << conf.localNodeId << " to: " << conf.remoteNodeId << endl; - continue; - } else { - noOfTransportersCreated++; - continue; - } - - } else if(strcmp("SCI", type) == 0){ - SCI_TransporterConfiguration conf; - conf.localNodeId = the_ownId; - conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2); - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(!tmp->get("SendLimit", &conf.sendLimit)) continue; - if(!tmp->get("SharedBufferSize", &conf.bufferSize)) continue; - - if(the_ownId == nodeId1){ - if(!tmp->get("Node1_NoOfAdapters", &conf.nLocalAdapters)) continue; - if(!tmp->get("Node2_Adapter", 0, &conf.remoteSciNodeId0)) continue; - - if(conf.nLocalAdapters > 1){ - if(!tmp->get("Node2_Adapter", 1, &conf.remoteSciNodeId1)) continue; - } - } else { - if(!tmp->get("Node2_NoOfAdapters", &conf.nLocalAdapters)) continue; - if(!tmp->get("Node1_Adapter", 0, &conf.remoteSciNodeId0)) continue; - - if(conf.nLocalAdapters > 1){ - if(!tmp->get("Node1_Adapter", 1, &conf.remoteSciNodeId1)) continue; - } - } - - if(!theTransporterRegistry->createTransporter(&conf)){ - ndbout << "Failed to create SCI Transporter from: " - << conf.localNodeId << " to: " << conf.remoteNodeId << endl; - continue; - } else { - noOfTransportersCreated++; - continue; - } - } - - if(!tmp->get("HostName1", &host1)) continue; - if(!tmp->get("HostName2", &host2)) continue; - - Uint32 ownNodeId; - Uint32 remoteNodeId; - const char * ownHostName; - const char * remoteHostName; - - if(nodeId1 == the_ownId){ - ownNodeId = nodeId1; - ownHostName = host1; - remoteNodeId = nodeId2; - remoteHostName = host2; - } else if(nodeId2 == the_ownId){ - ownNodeId = nodeId2; - ownHostName = host2; - remoteNodeId = nodeId1; - remoteHostName = host1; - } else { - continue; - } - - if(strcmp("TCP", type) == 0){ - TCP_TransporterConfiguration conf; - - if(!tmp->get("PortNumber", &conf.port)) continue; - if(!tmp->get("SendBufferSize", &conf.sendBufferSize)) continue; - if(!tmp->get("MaxReceiveSize", &conf.maxReceiveSize)) continue; - - const char * proxy; - if (tmp->get("Proxy", &proxy)) { - if (strlen(proxy) > 0 && nodeId2 == the_ownId) { - // TODO handle host:port - conf.port = atoi(proxy); - } - } - conf.sendBufferSize *= MAX_MESSAGE_SIZE; - conf.maxReceiveSize *= MAX_MESSAGE_SIZE; - - conf.remoteHostName = remoteHostName; - conf.localHostName = ownHostName; - conf.remoteNodeId = remoteNodeId; - conf.localNodeId = ownNodeId; - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(!theTransporterRegistry->createTransporter(&conf)){ - ndbout << "Failed to create TCP Transporter from: " - << ownNodeId << " to: " << remoteNodeId << endl; - } else { - noOfTransportersCreated++; - } - - } else if(strcmp("OSE", type) == 0){ - - OSE_TransporterConfiguration conf; - - if(!tmp->get("PrioASignalSize", &conf.prioASignalSize)) - continue; - if(!tmp->get("PrioBSignalSize", &conf.prioBSignalSize)) - continue; - if(!tmp->get("ReceiveArraySize", &conf.receiveBufferSize)) - continue; - - conf.remoteHostName = remoteHostName; - conf.localHostName = ownHostName; - conf.remoteNodeId = remoteNodeId; - conf.localNodeId = ownNodeId; - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(!theTransporterRegistry->createTransporter(&conf)){ - ndbout << "Failed to create OSE Transporter from: " - << ownNodeId << " to: " << remoteNodeId << endl; - } else { - noOfTransportersCreated++; - } - } else { - continue; - } - } - return noOfTransportersCreated; -} - -/** * Supply a nodeId, * and get next higher node id * Returns false if none found @@ -335,8 +166,39 @@ Uint32 IPCConfig::configureTransporters(Uint32 nodeId, const class ndb_mgm_configuration & config, class TransporterRegistry & tr){ + TransporterConfiguration conf; + DBUG_ENTER("IPCConfig::configureTransporters"); + /** + * Iterate over all MGM's an construct a connectstring + * create mgm_handle and give it to the Transporter Registry + */ + { + const char *separator= ""; + BaseString connect_string; + ndb_mgm_configuration_iterator iter(config, CFG_SECTION_NODE); + for(iter.first(); iter.valid(); iter.next()) + { + Uint32 type; + if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; + if(type != NODE_TYPE_MGM) continue; + const char* hostname; + Uint32 port; + if(iter.get(CFG_NODE_HOST, &hostname)) continue; + if( strlen(hostname) == 0 ) continue; + if(iter.get(CFG_MGM_PORT, &port)) continue; + connect_string.appfmt("%s%s:port",separator,hostname,port); + separator= ","; + } + NdbMgmHandle h= ndb_mgm_create_handle(); + if ( h && connect_string.length() > 0 ) + { + ndb_mgm_set_connectstring(h,connect_string.c_str()); + tr.set_mgm_handle(h); + } + } + Uint32 noOfTransportersCreated= 0; ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION); @@ -368,32 +230,68 @@ IPCConfig::configureTransporters(Uint32 nodeId, Uint32 server_port= 0; if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break; - if (nodeId <= nodeId1 && nodeId <= nodeId2) { + + /* + We check the node type. MGM node becomes server. + */ + Uint32 node1type, node2type; + ndb_mgm_configuration_iterator node1iter(config, CFG_SECTION_NODE); + ndb_mgm_configuration_iterator node2iter(config, CFG_SECTION_NODE); + node1iter.find(CFG_NODE_ID,nodeId1); + node2iter.find(CFG_NODE_ID,nodeId2); + node1iter.get(CFG_TYPE_OF_SECTION,&node1type); + node2iter.get(CFG_TYPE_OF_SECTION,&node2type); + + conf.serverNodeId= (nodeId1 < nodeId2)? nodeId1:nodeId2; + + conf.isMgmConnection= false; + if(node2type==NODE_TYPE_MGM) + { + conf.isMgmConnection= true; + conf.serverNodeId= nodeId2; + } + else if(node1type==NODE_TYPE_MGM) + { + conf.isMgmConnection= true; + conf.serverNodeId= nodeId1; + } + else if (nodeId == conf.serverNodeId) { tr.add_transporter_interface(remoteNodeId, localHostName, server_port); } + DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d", nodeId, remoteNodeId, server_port, sendSignalId, checksum)); + /* + This may be a dynamic port. It depends on when we're getting + our configuration. If we've been restarted, we'll be getting + a configuration with our old dynamic port in it, hence the number + here is negative (and we try the old port number first). + + On a first-run, server_port will be zero (with dynamic ports) + + If we're not using dynamic ports, we don't do anything. + */ + if((int)server_port<0) + server_port= -server_port; + + conf.localNodeId = nodeId; + conf.remoteNodeId = remoteNodeId; + conf.checksum = checksum; + conf.signalId = sendSignalId; + conf.port = server_port; + conf.localHostName = localHostName; + conf.remoteHostName = remoteHostName; + switch(type){ - case CONNECTION_TYPE_SHM:{ - SHM_TransporterConfiguration conf; - conf.localNodeId = nodeId; - conf.remoteNodeId = remoteNodeId; - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break; - if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break; - { - Uint32 tmp; - if(iter.get(CFG_SHM_SIGNUM, &tmp)) break; - conf.signum= tmp; - } + case CONNECTION_TYPE_SHM: + if(iter.get(CFG_SHM_KEY, &conf.shm.shmKey)) break; + if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shm.shmSize)) break; - conf.port= server_port; - conf.localHostName = localHostName; - conf.remoteHostName = remoteHostName; + Uint32 tmp; + if(iter.get(CFG_SHM_SIGNUM, &tmp)) break; + conf.shm.signum= tmp; - if(!tr.createTransporter(&conf)){ + if(!tr.createSHMTransporter(&conf)){ DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d", conf.localNodeId, conf.remoteNodeId)); ndbout << "Failed to create SHM Transporter from: " @@ -401,60 +299,53 @@ IPCConfig::configureTransporters(Uint32 nodeId, } else { noOfTransportersCreated++; } - DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, buf size = %d", - conf.shmKey, conf.shmSize)); + DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, " + "buf size = %d", conf.shm.shmKey, conf.shm.shmSize)); + break; - } - case CONNECTION_TYPE_SCI:{ - SCI_TransporterConfiguration conf; - conf.localNodeId = nodeId; - conf.remoteNodeId = remoteNodeId; - conf.checksum = checksum; - conf.signalId = sendSignalId; - conf.port= server_port; - - conf.localHostName = localHostName; - conf.remoteHostName = remoteHostName; - if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break; - if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break; + case CONNECTION_TYPE_SCI: + if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sci.sendLimit)) break; + if(iter.get(CFG_SCI_BUFFER_MEM, &conf.sci.bufferSize)) break; if (nodeId == nodeId1) { - if(iter.get(CFG_SCI_HOST2_ID_0, &conf.remoteSciNodeId0)) break; - if(iter.get(CFG_SCI_HOST2_ID_1, &conf.remoteSciNodeId1)) break; + if(iter.get(CFG_SCI_HOST2_ID_0, &conf.sci.remoteSciNodeId0)) break; + if(iter.get(CFG_SCI_HOST2_ID_1, &conf.sci.remoteSciNodeId1)) break; } else { - if(iter.get(CFG_SCI_HOST1_ID_0, &conf.remoteSciNodeId0)) break; - if(iter.get(CFG_SCI_HOST1_ID_1, &conf.remoteSciNodeId1)) break; + if(iter.get(CFG_SCI_HOST1_ID_0, &conf.sci.remoteSciNodeId0)) break; + if(iter.get(CFG_SCI_HOST1_ID_1, &conf.sci.remoteSciNodeId1)) break; } - if (conf.remoteSciNodeId1 == 0) { - conf.nLocalAdapters = 1; + if (conf.sci.remoteSciNodeId1 == 0) { + conf.sci.nLocalAdapters = 1; } else { - conf.nLocalAdapters = 2; + conf.sci.nLocalAdapters = 2; } - if(!tr.createTransporter(&conf)){ + if(!tr.createSCITransporter(&conf)){ DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d", conf.localNodeId, conf.remoteNodeId)); ndbout << "Failed to create SCI Transporter from: " << conf.localNodeId << " to: " << conf.remoteNodeId << endl; } else { - DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, remote SCI node id %d", - conf.nLocalAdapters, conf.remoteSciNodeId0)); - DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, buf size = %d", - conf.localHostName, conf.remoteHostName, conf.sendLimit, conf.bufferSize)); - if (conf.nLocalAdapters > 1) { - DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, second remote SCI node id = %d", - conf.remoteSciNodeId1)); + DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, " + "remote SCI node id %d", + conf.sci.nLocalAdapters, conf.sci.remoteSciNodeId0)); + DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, " + "buf size = %d", conf.localHostName, + conf.remoteHostName, conf.sci.sendLimit, + conf.sci.bufferSize)); + if (conf.sci.nLocalAdapters > 1) { + DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, " + "second remote SCI node id = %d", + conf.sci.remoteSciNodeId1)); } noOfTransportersCreated++; continue; } - } - case CONNECTION_TYPE_TCP:{ - TCP_TransporterConfiguration conf; - - if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break; - if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break; + break; + + case CONNECTION_TYPE_TCP: + if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.tcp.sendBufferSize)) break; + if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.tcp.maxReceiveSize)) break; - conf.port= server_port; const char * proxy; if (!iter.get(CFG_TCP_PROXY, &proxy)) { if (strlen(proxy) > 0 && nodeId2 == nodeId) { @@ -463,50 +354,35 @@ IPCConfig::configureTransporters(Uint32 nodeId, } } - conf.localNodeId = nodeId; - conf.remoteNodeId = remoteNodeId; - conf.localHostName = localHostName; - conf.remoteHostName = remoteHostName; - conf.checksum = checksum; - conf.signalId = sendSignalId; - - if(!tr.createTransporter(&conf)){ + if(!tr.createTCPTransporter(&conf)){ ndbout << "Failed to create TCP Transporter from: " << nodeId << " to: " << remoteNodeId << endl; } else { noOfTransportersCreated++; } - DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, maxReceiveSize = %d", - conf.sendBufferSize, conf.maxReceiveSize)); + DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, " + "maxReceiveSize = %d", conf.tcp.sendBufferSize, + conf.tcp.maxReceiveSize)); break; - case CONNECTION_TYPE_OSE:{ - OSE_TransporterConfiguration conf; - - if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break; - if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break; - if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break; - - conf.localNodeId = nodeId; - conf.remoteNodeId = remoteNodeId; - conf.localHostName = localHostName; - conf.remoteHostName = remoteHostName; - conf.checksum = checksum; - conf.signalId = sendSignalId; + case CONNECTION_TYPE_OSE: + if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.ose.prioASignalSize)) break; + if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.ose.prioBSignalSize)) break; - if(!tr.createTransporter(&conf)){ + if(!tr.createOSETransporter(&conf)){ ndbout << "Failed to create OSE Transporter from: " << nodeId << " to: " << remoteNodeId << endl; } else { noOfTransportersCreated++; } - } + break; + default: ndbout << "Unknown transporter type from: " << nodeId << " to: " << remoteNodeId << endl; break; - } - } - } + } // switch + } // for + DBUG_RETURN(noOfTransportersCreated); } diff --git a/ndb/src/common/transporter/OSE_Transporter.cpp b/ndb/src/common/transporter/OSE_Transporter.cpp index a52862a80e5..ad67791fc0c 100644 --- a/ndb/src/common/transporter/OSE_Transporter.cpp +++ b/ndb/src/common/transporter/OSE_Transporter.cpp @@ -32,6 +32,7 @@ OSE_Transporter::OSE_Transporter(int _prioASignalSize, NodeId localNodeId, const char * lHostName, NodeId remoteNodeId, + NodeId serverNodeId, const char * rHostName, int byteorder, bool compression, @@ -40,6 +41,7 @@ OSE_Transporter::OSE_Transporter(int _prioASignalSize, Uint32 reportFreq) : Transporter(localNodeId, remoteNodeId, + serverNodeId, byteorder, compression, checksum, diff --git a/ndb/src/common/transporter/OSE_Transporter.hpp b/ndb/src/common/transporter/OSE_Transporter.hpp index 4fd06130477..898352366ba 100644 --- a/ndb/src/common/transporter/OSE_Transporter.hpp +++ b/ndb/src/common/transporter/OSE_Transporter.hpp @@ -48,6 +48,7 @@ public: NodeId localNodeId, const char * lHostName, NodeId remoteNodeId, + NodeId serverNodeId, const char * rHostName, int byteorder, bool compression, diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp index e7807c972b1..506140a887f 100644 --- a/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/ndb/src/common/transporter/SCI_Transporter.cpp @@ -34,19 +34,21 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, + bool isMgmConnection, Uint32 packetSize, Uint32 bufferSize, Uint32 nAdapters, Uint16 remoteSciNodeId0, Uint16 remoteSciNodeId1, NodeId _localNodeId, - NodeId _remoteNodeId, + NodeId _remoteNodeId, + NodeId serverNodeId, bool chksm, bool signalId, Uint32 reportFreq) : Transporter(t_reg, tt_SCI_TRANSPORTER, - lHostName, rHostName, r_port, _localNodeId, - _remoteNodeId, 0, false, chksm, signalId) + lHostName, rHostName, r_port, isMgmConnection, _localNodeId, + _remoteNodeId, serverNodeID, 0, false, chksm, signalId) { DBUG_ENTER("SCI_Transporter::SCI_Transporter"); m_PacketSize = (packetSize + 3)/4 ; diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp index e2f2dfcaf99..8d263f32a57 100644 --- a/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/ndb/src/common/transporter/SCI_Transporter.hpp @@ -139,13 +139,15 @@ private: const char *local_host, const char *remote_host, int port, + bool isMgmConnection, Uint32 packetSize, Uint32 bufferSize, Uint32 nAdapters, Uint16 remoteSciNodeId0, Uint16 remoteSciNodeId1, NodeId localNodeID, - NodeId remoteNodeID, + NodeId remoteNodeID, + NodeId serverNodeId, bool checksum, bool signalId, Uint32 reportFreq = 4096); diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp index eed3ad77be6..e2d23cf94e2 100644 --- a/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/ndb/src/common/transporter/SHM_Transporter.cpp @@ -32,14 +32,17 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, + bool isMgmConnection, NodeId lNodeId, - NodeId rNodeId, + NodeId rNodeId, + NodeId serverNodeId, bool checksum, bool signalId, key_t _shmKey, Uint32 _shmSize) : Transporter(t_reg, tt_SHM_TRANSPORTER, - lHostName, rHostName, r_port, lNodeId, rNodeId, + lHostName, rHostName, r_port, isMgmConnection, + lNodeId, rNodeId, serverNodeId, 0, false, checksum, signalId), shmKey(_shmKey), shmSize(_shmSize) diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp index b501f652168..677bd6efc37 100644 --- a/ndb/src/common/transporter/SHM_Transporter.hpp +++ b/ndb/src/common/transporter/SHM_Transporter.hpp @@ -36,8 +36,10 @@ public: const char *lHostName, const char *rHostName, int r_port, + bool isMgmConnection, NodeId lNodeId, - NodeId rNodeId, + NodeId rNodeId, + NodeId serverNodeId, bool checksum, bool signalId, key_t shmKey, diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp index a629b620157..fd71cf71cd9 100644 --- a/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/ndb/src/common/transporter/TCP_Transporter.cpp @@ -68,12 +68,15 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, + bool isMgmConnection, NodeId lNodeId, NodeId rNodeId, + NodeId serverNodeId, bool chksm, bool signalId, Uint32 _reportFreq) : Transporter(t_reg, tt_TCP_TRANSPORTER, - lHostName, rHostName, r_port, lNodeId, rNodeId, + lHostName, rHostName, r_port, isMgmConnection, + lNodeId, rNodeId, serverNodeId, 0, false, chksm, signalId), m_sendBuffer(sendBufSize) { diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp index 48046310bf8..9cd174150c1 100644 --- a/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/ndb/src/common/transporter/TCP_Transporter.hpp @@ -49,9 +49,11 @@ private: int sendBufferSize, int maxReceiveSize, const char *lHostName, const char *rHostName, - int r_port, + int r_port, + bool isMgmConnection, NodeId lHostId, NodeId rHostId, + NodeId serverNodeId, bool checksum, bool signalId, Uint32 reportFreq = 4096); diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp index b84f8f6fb5e..a888d98b832 100644 --- a/ndb/src/common/transporter/Transporter.cpp +++ b/ndb/src/common/transporter/Transporter.cpp @@ -31,13 +31,15 @@ Transporter::Transporter(TransporterRegistry &t_reg, TransporterType _type, const char *lHostName, const char *rHostName, - int r_port, + int s_port, + bool _isMgmConnection, NodeId lNodeId, - NodeId rNodeId, + NodeId rNodeId, + NodeId serverNodeId, int _byteorder, bool _compression, bool _checksum, bool _signalId) - : m_r_port(r_port), remoteNodeId(rNodeId), localNodeId(lNodeId), - isServer(lNodeId < rNodeId), + : m_s_port(s_port), remoteNodeId(rNodeId), localNodeId(lNodeId), + isServer(lNodeId==serverNodeId), isMgmConnection(_isMgmConnection), m_packer(_signalId, _checksum), m_type(_type), m_transporter_registry(t_reg) @@ -61,10 +63,10 @@ Transporter::Transporter(TransporterRegistry &t_reg, if (strlen(lHostName) > 0) Ndb_getInAddr(&localHostAddress, lHostName); - DBUG_PRINT("info",("rId=%d lId=%d isServer=%d rHost=%s lHost=%s r_port=%d", + DBUG_PRINT("info",("rId=%d lId=%d isServer=%d rHost=%s lHost=%s s_port=%d", remoteNodeId, localNodeId, isServer, remoteHostName, localHostName, - r_port)); + s_port)); byteOrder = _byteorder; compressionUsed = _compression; @@ -74,10 +76,13 @@ Transporter::Transporter(TransporterRegistry &t_reg, m_connected = false; m_timeOutMillis = 1000; + if(s_port<0) + s_port= -s_port; // was dynamic + if (isServer) m_socket_client= 0; else - m_socket_client= new SocketClient(remoteHostName, r_port, + m_socket_client= new SocketClient(remoteHostName, s_port, new SocketAuthSimple("ndbd", "ndbd passwd")); DBUG_VOID_RETURN; @@ -109,22 +114,33 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) { bool Transporter::connect_client() { + NDB_SOCKET_TYPE sockfd; + if(m_connected) return true; - NDB_SOCKET_TYPE sockfd = m_socket_client->connect(); - + + if(isMgmConnection) + sockfd= m_transporter_registry.connect_ndb_mgmd(m_socket_client); + else + sockfd= m_socket_client->connect(); + if (sockfd == NDB_INVALID_SOCKET) return false; DBUG_ENTER("Transporter::connect_client"); + DBUG_PRINT("info",("port %d isMgmConnection=%d",m_s_port,isMgmConnection)); + + SocketOutputStream s_output(sockfd); + SocketInputStream s_input(sockfd); + // send info about own id // send info about own transporter type - SocketOutputStream s_output(sockfd); + s_output.println("%d %d", localNodeId, m_type); // get remote id int nodeId, remote_transporter_type= -1; - SocketInputStream s_input(sockfd); + char buf[256]; if (s_input.gets(buf, 256) == 0) { NDB_CLOSE_SOCKET(sockfd); diff --git a/ndb/src/common/transporter/Transporter.hpp b/ndb/src/common/transporter/Transporter.hpp index 31c915c9b5d..5b25afa0d89 100644 --- a/ndb/src/common/transporter/Transporter.hpp +++ b/ndb/src/common/transporter/Transporter.hpp @@ -70,23 +70,31 @@ public: NodeId getLocalNodeId() const; /** - * Get r_port we're connecting to + * Get port we're connecting to (signed) */ - unsigned int get_r_port() { return m_r_port; }; + int get_s_port() { return m_s_port; }; /** - * Set r_port to connect to + * Set port to connect to (signed) */ - void set_r_port(unsigned int port) { m_r_port = port; }; + void set_s_port(int port) { + m_s_port = port; + if(port<0) + port= -port; + if(m_socket_client) + m_socket_client->set_port(port); + }; protected: Transporter(TransporterRegistry &, TransporterType, const char *lHostName, const char *rHostName, - int r_port, + int s_port, + bool isMgmConnection, NodeId lNodeId, - NodeId rNodeId, + NodeId rNodeId, + NodeId serverNodeId, int byteorder, bool compression, bool checksum, @@ -112,7 +120,7 @@ protected: struct in_addr remoteHostAddress; struct in_addr localHostAddress; - unsigned int m_r_port; + int m_s_port; const NodeId remoteNodeId; const NodeId localNodeId; @@ -129,6 +137,12 @@ protected: private: + /** + * means that we transform an MGM connection into + * a transporter connection + */ + bool isMgmConnection; + SocketClient *m_socket_client; protected: diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index e22b0c30c1e..b8dd2d1f561 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -48,6 +48,7 @@ extern int g_ndb_shm_signum; #include <InputStream.hpp> #include <OutputStream.hpp> +#include <mgmapi/mgmapi.h> #include <mgmapi/mgmapi_debug.h> #include <EventLogger.hpp> @@ -70,15 +71,14 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) DBUG_RETURN(0); } -TransporterRegistry::TransporterRegistry(NdbMgmHandle mgm_handle, - void * callback, +TransporterRegistry::TransporterRegistry(void * callback, unsigned _maxTransporters, unsigned sizeOfLongSignalMemory) { nodeIdSpecified = false; maxTransporters = _maxTransporters; sendCounter = 1; - m_mgm_handle = mgm_handle; + m_mgm_handle= 0; callbackObj=callback; @@ -113,6 +113,27 @@ TransporterRegistry::TransporterRegistry(NdbMgmHandle mgm_handle, theOSEJunkSocketRecv = 0; } +void TransporterRegistry::set_mgm_handle(NdbMgmHandle h) +{ + DBUG_ENTER("TransporterRegistry::set_mgm_handle"); + if (m_mgm_handle) + ndb_mgm_destroy_handle(&m_mgm_handle); + m_mgm_handle= h; +#ifndef DBUG_OFF + if (h) + { + char buf[256]; + DBUG_PRINT("info",("handle set with connectstring: %s", + ndb_mgm_get_connectstring(h,buf, sizeof(buf)))); + } + else + { + DBUG_PRINT("info",("handle set to NULL")); + } +#endif + DBUG_VOID_RETURN; +} + TransporterRegistry::~TransporterRegistry() { removeAll(); @@ -133,6 +154,8 @@ TransporterRegistry::~TransporterRegistry() { theOSEReceiver = 0; } #endif + if (m_mgm_handle) + ndb_mgm_destroy_handle(&m_mgm_handle); } void @@ -248,7 +271,7 @@ TransporterRegistry::connect_server(NDB_SOCKET_TYPE sockfd) } bool -TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) { +TransporterRegistry::createTCPTransporter(TransporterConfiguration *config) { #ifdef NDB_TCP_TRANSPORTER if(!nodeIdSpecified){ @@ -262,13 +285,15 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) { return false; TCP_Transporter * t = new TCP_Transporter(*this, - config->sendBufferSize, - config->maxReceiveSize, + config->tcp.sendBufferSize, + config->tcp.maxReceiveSize, config->localHostName, config->remoteHostName, config->port, + config->isMgmConnection, localNodeId, config->remoteNodeId, + config->serverNodeId, config->checksum, config->signalId); if (t == NULL) @@ -297,7 +322,7 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) { } bool -TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) { +TransporterRegistry::createOSETransporter(TransporterConfiguration *conf) { #ifdef NDB_OSE_TRANSPORTER if(!nodeIdSpecified){ @@ -316,11 +341,12 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) { localNodeId); } - OSE_Transporter * t = new OSE_Transporter(conf->prioASignalSize, - conf->prioBSignalSize, + OSE_Transporter * t = new OSE_Transporter(conf->ose.prioASignalSize, + conf->ose.prioBSignalSize, localNodeId, conf->localHostName, conf->remoteNodeId, + conf->serverNodeId, conf->remoteHostName, conf->checksum, conf->signalId); @@ -346,7 +372,7 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) { } bool -TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) { +TransporterRegistry::createSCITransporter(TransporterConfiguration *config) { #ifdef NDB_SCI_TRANSPORTER if(!SCI_Transporter::initSCI()) @@ -366,13 +392,15 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) { config->localHostName, config->remoteHostName, config->port, - config->sendLimit, - config->bufferSize, - config->nLocalAdapters, - config->remoteSciNodeId0, - config->remoteSciNodeId1, + config->isMgmConnection, + config->sci.sendLimit, + config->sci.bufferSize, + config->sci.nLocalAdapters, + config->sci.remoteSciNodeId0, + config->sci.remoteSciNodeId1, localNodeId, config->remoteNodeId, + config->serverNodeId, config->checksum, config->signalId); @@ -397,7 +425,7 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) { } bool -TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) { +TransporterRegistry::createSHMTransporter(TransporterConfiguration *config) { DBUG_ENTER("TransporterRegistry::createTransporter SHM"); #ifdef NDB_SHM_TRANSPORTER if(!nodeIdSpecified){ @@ -408,7 +436,7 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) { return false; if (!g_ndb_shm_signum) { - g_ndb_shm_signum= config->signum; + g_ndb_shm_signum= config->shm.signum; DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum)); /** * Make sure to block g_ndb_shm_signum @@ -420,7 +448,7 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) { pthread_sigmask(SIG_BLOCK, &mask, 0); } - if(config->signum != g_ndb_shm_signum) + if(config->shm.signum != g_ndb_shm_signum) return false; if(theTransporters[config->remoteNodeId] != NULL) @@ -430,12 +458,14 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) { config->localHostName, config->remoteHostName, config->port, + config->isMgmConnection, localNodeId, config->remoteNodeId, + config->serverNodeId, config->checksum, config->signalId, - config->shmKey, - config->shmSize + config->shm.shmKey, + config->shm.shmSize ); if (t == NULL) return false; @@ -1203,25 +1233,59 @@ TransporterRegistry::start_clients_thread() switch(performStates[nodeId]){ case CONNECTING: if(!t->isConnected() && !t->isServer) { - if(t->get_r_port() <= 0) { // Port is dynamic - Uint32 server_port=0; + bool connected= false; + /** + * First, we try to connect (if we have a port number). + */ + if (t->get_s_port()) + connected= t->connect_client(); + + /** + * If dynamic, get the port for connecting from the management server + */ + if( !connected && t->get_s_port() <= 0) { // Port is dynamic + int server_port= 0; struct ndb_mgm_reply mgm_reply; - int res; - res=ndb_mgm_get_connection_int_parameter(m_mgm_handle, + + if(!ndb_mgm_is_connected(m_mgm_handle)) + ndb_mgm_connect(m_mgm_handle, 0, 0, 0); + + if(ndb_mgm_is_connected(m_mgm_handle)) + { + int res= + ndb_mgm_get_connection_int_parameter(m_mgm_handle, t->getRemoteNodeId(), t->getLocalNodeId(), CFG_CONNECTION_SERVER_PORT, &server_port, &mgm_reply); - DBUG_PRINT("info",("Got dynamic port %u for %d -> %d (ret: %d)", - server_port,t->getRemoteNodeId(), - t->getLocalNodeId())); - if(res>=0) - t->set_r_port(server_port); - else - ndbout_c("Failed to get dynamic port to connect to."); + DBUG_PRINT("info",("Got dynamic port %d for %d -> %d (ret: %d)", + server_port,t->getRemoteNodeId(), + t->getLocalNodeId(),res)); + if( res >= 0 ) + { + /** + * Server_port == 0 just means that that a mgmt server + * has not received a new port yet. Keep the old. + */ + if (server_port) + t->set_s_port(server_port); + } + else + { + ndbout_c("Failed to get dynamic port to connect to: %d", res); + ndb_mgm_disconnect(m_mgm_handle); + } + } + /** else + * We will not be able to get a new port unless + * the m_mgm_handle is connected. Note that not + * being connected is an ok state, just continue + * until it is able to connect. Continue using the + * old port until we can connect again and get a + * new port. + */ } - t->connect_client(); } break; case DISCONNECTING: @@ -1258,7 +1322,7 @@ TransporterRegistry::stop_clients() if (m_start_clients_thread) { m_run_start_clients_thread= false; void* status; - int r= NdbThread_WaitFor(m_start_clients_thread, &status); + NdbThread_WaitFor(m_start_clients_thread, &status); NdbThread_Destroy(&m_start_clients_thread); } return true; @@ -1267,17 +1331,17 @@ TransporterRegistry::stop_clients() void TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, const char *interf, - unsigned short port) + int s_port) { DBUG_ENTER("TransporterRegistry::add_transporter_interface"); - DBUG_PRINT("enter",("interface=%s, port= %d", interf, port)); + DBUG_PRINT("enter",("interface=%s, s_port= %d", interf, s_port)); if (interf && strlen(interf) == 0) interf= 0; for (unsigned i= 0; i < m_transporter_interface.size(); i++) { Transporter_interface &tmp= m_transporter_interface[i]; - if (port != tmp.m_service_port) + if (s_port != tmp.m_s_service_port || tmp.m_s_service_port==0) continue; if (interf != 0 && tmp.m_interface != 0 && strcmp(interf, tmp.m_interface) == 0) @@ -1291,7 +1355,7 @@ TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, } Transporter_interface t; t.m_remote_nodeId= remoteNodeId; - t.m_service_port= port; + t.m_s_service_port= s_port; t.m_interface= interf; m_transporter_interface.push_back(t); DBUG_PRINT("exit",("interface and port added")); @@ -1301,34 +1365,48 @@ TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, bool TransporterRegistry::start_service(SocketServer& socket_server) { + DBUG_ENTER("TransporterRegistry::start_service"); if (m_transporter_interface.size() > 0 && !nodeIdSpecified) { ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified"); - return false; + DBUG_RETURN(false); } for (unsigned i= 0; i < m_transporter_interface.size(); i++) { Transporter_interface &t= m_transporter_interface[i]; - if (t.m_service_port == 0) - { - continue; - } + + unsigned short port= (unsigned short)t.m_s_service_port; + if(t.m_s_service_port<0) + port= -t.m_s_service_port; // is a dynamic port TransporterService *transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); if(!socket_server.setup(transporter_service, - t.m_service_port, t.m_interface)) + &port, t.m_interface)) { - ndbout_c("Unable to setup transporter service port: %s:%d!\n" - "Please check if the port is already used,\n" - "(perhaps the node is already running)", - t.m_interface ? t.m_interface : "*", t.m_service_port); - delete transporter_service; - return false; + DBUG_PRINT("info", ("Trying new port")); + port= 0; + if(t.m_s_service_port>0 + || !socket_server.setup(transporter_service, + &port, t.m_interface)) + { + /* + * If it wasn't a dynamically allocated port, or + * our attempts at getting a new dynamic port failed + */ + ndbout_c("Unable to setup transporter service port: %s:%d!\n" + "Please check if the port is already used,\n" + "(perhaps the node is already running)", + t.m_interface ? t.m_interface : "*", t.m_s_service_port); + delete transporter_service; + DBUG_RETURN(false); + } } + t.m_s_service_port= (t.m_s_service_port<=0)?-port:port; // -`ve if dynamic + DBUG_PRINT("info", ("t.m_s_service_port = %d",t.m_s_service_port)); transporter_service->setTransporterRegistry(this); } - return true; + DBUG_RETURN(true); } #ifdef NDB_SHM_TRANSPORTER @@ -1380,12 +1458,7 @@ TransporterRegistry::startReceiving() DBUG_PRINT("error",("Install failed")); g_eventLogger.error("Failed to install signal handler for" " SHM transporter errno: %d (%s)", errno, -#ifdef HAVE_STRERROR - strerror(errno) -#else - "" -#endif - ); + strerror(errno)); } } #endif // NDB_SHM_TRANSPORTER @@ -1448,4 +1521,63 @@ TransporterRegistry::get_transporter(NodeId nodeId) { return theTransporters[nodeId]; } +NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(SocketClient *sc) +{ + NdbMgmHandle h= ndb_mgm_create_handle(); + struct ndb_mgm_reply mgm_reply; + + if ( h == NULL ) + { + return NDB_INVALID_SOCKET; + } + + /** + * Set connectstring + */ + { + char c[100]; + char *cs= &c[0]; + unsigned len= strlen(sc->get_server_name())+20; + if( len > sizeof(c) ) + { + /* + * server name is long. malloc enough for it and the port number + */ + cs= (char*)malloc(len*sizeof(char)); + if(!cs) + { + ndb_mgm_destroy_handle(&h); + return NDB_INVALID_SOCKET; + } + } + snprintf(cs,len,"%s:%u",sc->get_server_name(),sc->get_port()); + ndb_mgm_set_connectstring(h, cs); + if(cs != &c[0]) + free(cs); + } + + if(ndb_mgm_connect(h, 0, 0, 0)<0) + { + ndb_mgm_destroy_handle(&h); + return NDB_INVALID_SOCKET; + } + + for(unsigned int i=0;i < m_transporter_interface.size();i++) + if (ndb_mgm_set_connection_int_parameter(h, + get_localNodeId(), + m_transporter_interface[i].m_remote_nodeId, + CFG_CONNECTION_SERVER_PORT, + m_transporter_interface[i].m_s_service_port, + &mgm_reply) < 0) + { + ndb_mgm_destroy_handle(&h); + return NDB_INVALID_SOCKET; + } + + NDB_SOCKET_TYPE sockfd= ndb_mgm_convert_to_transporter(h); + if ( sockfd == NDB_INVALID_SOCKET) + ndb_mgm_destroy_handle(&h); + return sockfd; +} + template class Vector<TransporterRegistry::Transporter_interface>; diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 4f1ee423c75..34cfe2a6081 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -16,60 +16,7 @@ #include <NdbSqlUtil.hpp> #include <NdbOut.hpp> - -int -NdbSqlUtil::char_compare(const char* s1, unsigned n1, - const char* s2, unsigned n2, bool padded) -{ - int c1 = 0; - int c2 = 0; - unsigned i = 0; - while (i < n1 || i < n2) { - c1 = i < n1 ? s1[i] : padded ? 0x20 : 0; - c2 = i < n2 ? s2[i] : padded ? 0x20 : 0; - if (c1 != c2) - break; - i++; - } - return c1 - c2; -} - -bool -NdbSqlUtil::char_like(const char* s1, unsigned n1, - const char* s2, unsigned n2, bool padded) -{ - int c1 = 0; - int c2 = 0; - unsigned i1 = 0; - unsigned i2 = 0; - while (i1 < n1 || i2 < n2) { - c1 = i1 < n1 ? s1[i1] : padded ? 0x20 : 0; - c2 = i2 < n2 ? s2[i2] : padded ? 0x20 : 0; - if (c2 == '%') { - while (i2 + 1 < n2 && s2[i2 + 1] == '%') { - i2++; - } - unsigned m = 0; - while (m <= n1 - i1) { - if (char_like(s1 + i1 + m, n1 -i1 - m, - s2 + i2 + 1, n2 - i2 - 1, padded)) - return true; - m++; - } - return false; - } - if (c2 == '_') { - if (c1 == 0) - return false; - } else { - if (c1 != c2) - return false; - } - i1++; - i2++; - } - return i1 == n2 && i2 == n2; -} +#include <my_sys.h> /* * Data types. The entries must be in the numerical order. @@ -79,127 +26,158 @@ const NdbSqlUtil::Type NdbSqlUtil::m_typeList[] = { { // 0 Type::Undefined, + NULL, NULL }, { // 1 Type::Tinyint, - cmpTinyint + cmpTinyint, + NULL }, { // 2 Type::Tinyunsigned, - cmpTinyunsigned + cmpTinyunsigned, + NULL }, { // 3 Type::Smallint, - cmpSmallint + cmpSmallint, + NULL }, { // 4 Type::Smallunsigned, - cmpSmallunsigned + cmpSmallunsigned, + NULL }, { // 5 Type::Mediumint, - cmpMediumint + cmpMediumint, + NULL }, { // 6 Type::Mediumunsigned, - cmpMediumunsigned + cmpMediumunsigned, + NULL }, { // 7 Type::Int, - cmpInt + cmpInt, + NULL }, { // 8 Type::Unsigned, - cmpUnsigned + cmpUnsigned, + NULL }, { // 9 Type::Bigint, - cmpBigint + cmpBigint, + NULL }, { // 10 Type::Bigunsigned, - cmpBigunsigned + cmpBigunsigned, + NULL }, { // 11 Type::Float, - cmpFloat + cmpFloat, + NULL }, { // 12 Type::Double, - cmpDouble + cmpDouble, + NULL }, { // 13 Type::Olddecimal, - cmpOlddecimal + cmpOlddecimal, + NULL }, { // 14 Type::Char, - cmpChar + cmpChar, + likeChar }, { // 15 Type::Varchar, - cmpVarchar + cmpVarchar, + likeVarchar }, { // 16 Type::Binary, - cmpBinary + cmpBinary, + likeBinary }, { // 17 Type::Varbinary, - cmpVarbinary + cmpVarbinary, + likeVarbinary }, { // 18 Type::Datetime, - cmpDatetime + cmpDatetime, + NULL }, { // 19 Type::Date, - cmpDate + cmpDate, + NULL }, { // 20 Type::Blob, - NULL // cmpBlob + NULL, + NULL }, { // 21 Type::Text, - NULL // cmpText + NULL, + NULL }, { // 22 Type::Bit, - NULL // cmpBit + NULL, + NULL }, { // 23 Type::Longvarchar, - cmpLongvarchar + cmpLongvarchar, + likeLongvarchar }, { // 24 Type::Longvarbinary, - cmpLongvarbinary + cmpLongvarbinary, + likeLongvarbinary }, { // 25 Type::Time, - cmpTime + cmpTime, + NULL }, { // 26 Type::Year, - cmpYear + cmpYear, + NULL }, { // 27 Type::Timestamp, - cmpTimestamp + cmpTimestamp, + NULL }, { // 28 Type::Olddecimalunsigned, - cmpOlddecimalunsigned + cmpOlddecimalunsigned, + NULL }, { // 29 Type::Decimal, - cmpDecimal + cmpDecimal, + NULL }, { // 30 Type::Decimalunsigned, - cmpDecimalunsigned + cmpDecimalunsigned, + NULL } }; @@ -815,6 +793,83 @@ NdbSqlUtil::cmpTimestamp(const void* info, const void* p1, unsigned n1, const vo return CmpUnknown; } +// like + +static const int ndb_wild_prefix = '\\'; +static const int ndb_wild_one = '_'; +static const int ndb_wild_many = '%'; + +int +NdbSqlUtil::likeChar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + const char* v1 = (const char*)p1; + const char* v2 = (const char*)p2; + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + int k = (cs->coll->wildcmp)(cs, v1, v1 + n1, v2, v2 + n2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); + return k == 0 ? 0 : +1; +} + +int +NdbSqlUtil::likeBinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + assert(info == 0); + return likeChar(&my_charset_bin, p1, n1, p2, n2); +} + +int +NdbSqlUtil::likeVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + const unsigned lb = 1; + if (n1 >= lb && n2 >= lb) { + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = *v1; + unsigned m2 = *v2; + if (lb + m1 <= n1 && lb + m2 <= n2) { + const char* w1 = (const char*)v1 + lb; + const char* w2 = (const char*)v2 + lb; + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + int k = (cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); + return k == 0 ? 0 : +1; + } + } + return -1; +} + +int +NdbSqlUtil::likeVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + assert(info == 0); + return likeVarchar(&my_charset_bin, p1, n1, p2, n2); +} + +int +NdbSqlUtil::likeLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + const unsigned lb = 2; + if (n1 >= lb && n2 >= lb) { + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = uint2korr(v1); + unsigned m2 = uint2korr(v2); + if (lb + m1 <= n1 && lb + m2 <= n2) { + const char* w1 = (const char*)v1 + lb; + const char* w2 = (const char*)v2 + lb; + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + int k = (cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); + return k == 0 ? 0 : +1; + } + } + return -1; +} + +int +NdbSqlUtil::likeLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) +{ + assert(info == 0); + return likeLongvarchar(&my_charset_bin, p1, n1, p2, n2); +} + // check charset bool diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index da06389b5df..6019d24d736 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -82,15 +82,15 @@ SocketServer::tryBind(unsigned short port, const char * intface) { bool SocketServer::setup(SocketServer::Service * service, - unsigned short port, + unsigned short * port, const char * intface){ DBUG_ENTER("SocketServer::setup"); - DBUG_PRINT("enter",("interface=%s, port=%d", intface, port)); + DBUG_PRINT("enter",("interface=%s, port=%u", intface, *port)); struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_addr.s_addr = htonl(INADDR_ANY); - servaddr.sin_port = htons(port); + servaddr.sin_port = htons(*port); if(intface != 0){ if(Ndb_getInAddr(&servaddr.sin_addr, intface)) @@ -119,7 +119,17 @@ SocketServer::setup(SocketServer::Service * service, NDB_CLOSE_SOCKET(sock); DBUG_RETURN(false); } - + + /* Get the port we bound to */ + SOCKET_SIZE_TYPE sock_len = sizeof(servaddr); + if(getsockname(sock,(struct sockaddr*)&servaddr,&sock_len)<0) { + ndbout_c("An error occurred while trying to find out what" + " port we bound to. Error: %s",strerror(errno)); + NDB_CLOSE_SOCKET(sock); + DBUG_RETURN(false); + } + + DBUG_PRINT("info",("bound to %u",ntohs(servaddr.sin_port))); if (listen(sock, m_maxSessions) == -1){ DBUG_PRINT("error",("listen() - %d - %s", errno, strerror(errno))); @@ -131,6 +141,9 @@ SocketServer::setup(SocketServer::Service * service, i.m_socket = sock; i.m_service = service; m_services.push_back(i); + + *port = ntohs(servaddr.sin_port); + DBUG_RETURN(true); } @@ -310,11 +323,18 @@ sessionThread_C(void* _sc){ return 0; } - if(!si->m_stop){ - si->m_stopped = false; - si->runSession(); - } else { - NDB_CLOSE_SOCKET(si->m_socket); + /** + * may have m_stopped set if we're transforming a mgm + * connection into a transporter connection. + */ + if(!si->m_stopped) + { + if(!si->m_stop){ + si->m_stopped = false; + si->runSession(); + } else { + NDB_CLOSE_SOCKET(si->m_socket); + } } si->m_stopped = true; diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp index 25632f132e9..9bbd5e484d4 100644 --- a/ndb/src/cw/cpcd/main.cpp +++ b/ndb/src/cw/cpcd/main.cpp @@ -32,7 +32,7 @@ #include "common.hpp" static const char *work_dir = CPCD_DEFAULT_WORK_DIR; -static int port; +static short unsigned int port; static int use_syslog; static const char *logfile = NULL; static const char *config_file = CPCD_DEFAULT_CONFIG_FILE; @@ -142,7 +142,7 @@ int main(int argc, char** argv){ SocketServer * ss = new SocketServer(); CPCDAPIService * serv = new CPCDAPIService(cpcd); - if(!ss->setup(serv, port)){ + if(!ss->setup(serv, &port)){ logger.critical("Cannot setup server: %s", strerror(errno)); sleep(1); delete ss; diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 6e3876fe807..9001491dd64 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -197,7 +197,8 @@ void Cmvmi::execEVENT_REP(Signal* signal) Uint32 threshold; LogLevel::EventCategory eventCategory; Logger::LoggerLevel severity; - if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity)) + EventLoggerBase::EventTextFunction textF; + if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity,textF)) return; SubscriberPtr ptr; diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ddc2b926dc4..7ca7a9d72e0 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -6311,6 +6311,7 @@ Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr) void Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) { + Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX]; Uint32 k; jam(); const CreateIndxReq* const req = &opPtr.p->m_request; @@ -6379,39 +6380,49 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) // tree node size in words (make configurable later) indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE; } - // hash index attributes must currently be in table order - Uint32 prevAttrId = RNIL; + + AttributeMask mask; + mask.clear(); for (k = 0; k < opPtr.p->m_attrList.sz; k++) { jam(); - bool found = false; - for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) { - AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr); - tAttr = aRec->nextAttrInTable; - if (aRec->attributeId != opPtr.p->m_attrList.id[k]) + unsigned current_id= opPtr.p->m_attrList.id[k]; + AttributeRecord* aRec= NULL; + Uint32 tAttr= tablePtr.p->firstAttribute; + for (; tAttr != RNIL; tAttr= aRec->nextAttrInTable) + { + aRec = c_attributeRecordPool.getPtr(tAttr); + if (aRec->attributeId != current_id) continue; jam(); - found = true; - const Uint32 a = aRec->attributeDescriptor; - if (indexPtr.p->isHashIndex()) { - const Uint32 s1 = AttributeDescriptor::getSize(a); - const Uint32 s2 = AttributeDescriptor::getArraySize(a); - indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5; - } + break; } - if (! found) { + if (tAttr == RNIL) { jam(); opPtr.p->m_errorCode = CreateIndxRef::BadRequestType; opPtr.p->m_errorLine = __LINE__; return; } - if (indexPtr.p->isHashIndex() && - k > 0 && prevAttrId >= opPtr.p->m_attrList.id[k]) { + if (mask.get(current_id)) + { jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidAttributeOrder; + opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes; opPtr.p->m_errorLine = __LINE__; return; } - prevAttrId = opPtr.p->m_attrList.id[k]; + mask.set(current_id); + + const Uint32 a = aRec->attributeDescriptor; + unsigned kk= k; + if (indexPtr.p->isHashIndex()) { + const Uint32 s1 = AttributeDescriptor::getSize(a); + const Uint32 s2 = AttributeDescriptor::getArraySize(a); + indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5; + // reorder the attributes according to the tableid order + // for unque indexes + for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--) + attrid_map[kk]= attrid_map[kk-1]; + } + attrid_map[kk]= k | (current_id << 16); } indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes; // plus concatenated primary table key attribute @@ -6431,12 +6442,17 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) // write index key attributes AttributeRecordPtr aRecPtr; c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute); - for (k = 0; k < opPtr.p->m_attrList.sz; k++) { + for (unsigned k = 0; k < opPtr.p->m_attrList.sz; k++) { + // insert the attributes in the order decided above in attrid_map + // k is new order, current_id is in previous order + // ToDo: make sure "current_id" is stored with the table and + // passed up to NdbDictionary + unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff]; jam(); for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) { AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr); tAttr = aRec->nextAttrInTable; - if (aRec->attributeId != opPtr.p->m_attrList.id[k]) + if (aRec->attributeId != current_id) continue; jam(); const Uint32 a = aRec->attributeDescriptor; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index bc1700c12ad..761f959acdc 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -1837,12 +1837,8 @@ int Dbtup::interpreterNextLab(Signal* signal, tmpHabitant = attrId; } + // get type attrId >>= 16; - AttributeHeader ah(tmpArea[0]); - - const char* s1 = (char*)&tmpArea[1]; - const char* s2 = (char*)&TcurrentProgram[TprogramCounter+1]; - Uint32 attrLen = (4 * ah.getDataSize()); Uint32 TattrDescrIndex = tabptr.p->tabDescriptor + (attrId << ZAD_LOG_SIZE); Uint32 TattrDesc1 = tableDescriptor[TattrDescrIndex].tabDescr; @@ -1855,56 +1851,67 @@ int Dbtup::interpreterNextLab(Signal* signal, cs = tabptr.p->charsetArray[pos]; } const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(typeId); - + + // get data + AttributeHeader ah(tmpArea[0]); + const char* s1 = (char*)&tmpArea[1]; + const char* s2 = (char*)&TcurrentProgram[TprogramCounter+1]; + // fixed length in 5.0 + Uint32 attrLen = AttributeDescriptor::getSizeInBytes(TattrDesc1); + bool r1_null = ah.isNULL(); bool r2_null = argLen == 0; - int res; - if(r1_null || r2_null) - { - res = r1_null && r2_null ? 0 : r1_null ? -1 : 1; - } - else - { - /* --------------------------------------------------------- */ - // If length of argument rounded to nearest word is - // the same as attribute size, use that as argument size - /* --------------------------------------------------------- */ - if ((((argLen + 3) >> 2) << 2) == attrLen) argLen= attrLen; - res = (*sqlType.m_cmp)(cs, s1, attrLen, s2, argLen, true); - } - + int res1; + if (cond != Interpreter::LIKE && + cond != Interpreter::NOT_LIKE) { + if (r1_null || r2_null) { + // NULL==NULL and NULL<not-NULL + res1 = r1_null && r2_null ? 0 : r1_null ? -1 : 1; + } else { + res1 = (*sqlType.m_cmp)(cs, s1, attrLen, s2, argLen, true); + } + } else { + if (r1_null || r2_null) { + // NULL like NULL is true (has no practical use) + res1 = r1_null && r2_null ? 0 : -1; + } else { + res1 = (*sqlType.m_like)(cs, s1, attrLen, s2, argLen); + } + } + + int res = 0; switch ((Interpreter::BinaryCondition)cond) { case Interpreter::EQ: - res = (res == 0); + res = (res1 == 0); break; case Interpreter::NE: - res = (res != 0); + res = (res1 != 0); break; // note the condition is backwards case Interpreter::LT: - res = (res > 0); + res = (res1 > 0); break; case Interpreter::LE: - res = (res >= 0); + res = (res1 >= 0); break; case Interpreter::GT: - res = (res < 0); + res = (res1 < 0); break; case Interpreter::GE: - res = (res <= 0); + res = (res1 <= 0); break; case Interpreter::LIKE: - res = NdbSqlUtil::char_like(s1, attrLen, s2, argLen, false); + res = (res1 == 0); break; case Interpreter::NOT_LIKE: - res = ! NdbSqlUtil::char_like(s1, attrLen, s2, argLen, false); + res = (res1 == 1); break; // XXX handle invalid value } #ifdef TRACE_INTERPRETER - ndbout_c("cond=%u diff=%d vc=%d nopad=%d attr(%d) = >%.*s<(%d) str=>%.*s<(%d) -> res = %d", - cond, diff, vchr, nopad, - attrId >> 16, attrLen, s1, attrLen, argLen, s2, argLen, res); + ndbout_c("cond=%u attr(%d)='%.*s'(%d) str='%.*s'(%d) res1=%d res=%d", + cond, attrId >> 16, + attrLen, s1, attrLen, argLen, s2, argLen, res1, res); #endif if (res) TprogramCounter = brancher(theInstruction, TprogramCounter); diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp index 754832cd954..d982c2b96f8 100644 --- a/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/ndb/src/kernel/blocks/suma/Suma.cpp @@ -338,6 +338,7 @@ SumaParticipant::execCONTINUEB(Signal* signal) void Suma::execAPI_FAILREQ(Signal* signal) { jamEntry(); + DBUG_ENTER("Suma::execAPI_FAILREQ"); Uint32 failedApiNode = signal->theData[0]; //BlockReference retRef = signal->theData[1]; @@ -348,11 +349,13 @@ void Suma::execAPI_FAILREQ(Signal* signal) jam(); c_failedApiNodes.clear(failedApiNode); } + DBUG_VOID_RETURN; }//execAPI_FAILREQ() bool SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) { + DBUG_ENTER("SumaParticipant::removeSubscribersOnNode"); bool found = false; SubscriberPtr i_subbPtr; @@ -372,20 +375,15 @@ SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) jam(); sendSubStopReq(signal); } - return found; + DBUG_RETURN(found); } void SumaParticipant::sendSubStopReq(Signal *signal){ + DBUG_ENTER("SumaParticipant::sendSubStopReq"); static bool remove_lock = false; jam(); - if(remove_lock) { - jam(); - return; - } - remove_lock = true; - SubscriberPtr subbPtr; c_removeDataSubscribers.first(subbPtr); if (subbPtr.isNull()){ @@ -398,9 +396,15 @@ SumaParticipant::sendSubStopReq(Signal *signal){ c_failedApiNodes.clear(); remove_lock = false; - return; + DBUG_VOID_RETURN; } + if(remove_lock) { + jam(); + DBUG_VOID_RETURN; + } + remove_lock = true; + SubscriptionPtr subPtr; c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); @@ -414,6 +418,7 @@ SumaParticipant::sendSubStopReq(Signal *signal){ req->part = SubscriptionData::TableData; sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); + DBUG_VOID_RETURN; } void @@ -452,6 +457,8 @@ SumaParticipant::execSUB_STOP_REF(Signal* signal){ jamEntry(); SubStopRef * const ref = (SubStopRef*)signal->getDataPtr(); + DBUG_ENTER("SumaParticipant::execSUB_STOP_REF"); + Uint32 subscriptionId = ref->subscriptionId; Uint32 subscriptionKey = ref->subscriptionKey; Uint32 part = ref->part; @@ -471,11 +478,14 @@ SumaParticipant::execSUB_STOP_REF(Signal* signal){ req->part = part; sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); + + DBUG_VOID_RETURN; } void Suma::execNODE_FAILREP(Signal* signal){ jamEntry(); + DBUG_ENTER("Suma::execNODE_FAILREP"); NodeFailRep * const rep = (NodeFailRep*)signal->getDataPtr(); @@ -541,6 +551,7 @@ Suma::execNODE_FAILREP(Signal* signal){ c_aliveNodes.clear(nodePtr.p->nodeId); // this has to be done after the loop above } } + DBUG_VOID_RETURN; } void @@ -610,6 +621,19 @@ Suma::execSIGNAL_DROPPED_REP(Signal* signal){ * */ +static unsigned +count_subscribers(const DLList<SumaParticipant::Subscriber> &subs) +{ + unsigned n= 0; + SumaParticipant::SubscriberPtr i_subbPtr; + subs.first(i_subbPtr); + while(!i_subbPtr.isNull()){ + n++; + subs.next(i_subbPtr); + } + return n; +} + void Suma::execDUMP_STATE_ORD(Signal* signal){ jamEntry(); @@ -664,6 +688,15 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ infoEvent("Suma: c_dataBufferPool size: %d free: %d", c_dataBufferPool.getSize(), c_dataBufferPool.getNoOfFree()); + + infoEvent("Suma: c_metaSubscribers count: %d", + count_subscribers(c_metaSubscribers)); + infoEvent("Suma: c_dataSubscribers count: %d", + count_subscribers(c_dataSubscribers)); + infoEvent("Suma: c_prepDataSubscribers count: %d", + count_subscribers(c_prepDataSubscribers)); + infoEvent("Suma: c_removeDataSubscribers count: %d", + count_subscribers(c_removeDataSubscribers)); } } @@ -1429,7 +1462,7 @@ SumaParticipant::execDIGETPRIMCONF(Signal* signal){ void SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){ jamEntry(); - + DBUG_ENTER("SumaParticipant::execCREATE_TRIG_CONF"); CRASH_INSERTION(13009); CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr(); @@ -1442,6 +1475,7 @@ SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){ * dodido * @todo: I (Johan) dont know what to do here. Jonas, what do you mean? */ + DBUG_VOID_RETURN; } void @@ -1453,7 +1487,7 @@ SumaParticipant::execCREATE_TRIG_REF(Signal* signal){ void SumaParticipant::execDROP_TRIG_CONF(Signal* signal){ jamEntry(); - + DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF"); CRASH_INSERTION(13010); DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr(); @@ -1461,17 +1495,19 @@ SumaParticipant::execDROP_TRIG_CONF(Signal* signal){ const Uint32 senderData = conf->getConnectionPtr(); SyncRecord* tmp = c_syncPool.getPtr(senderData); tmp->runDROP_TRIG_CONF(signal); + DBUG_VOID_RETURN; } void SumaParticipant::execDROP_TRIG_REF(Signal* signal){ jamEntry(); - + DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF"); DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr(); const Uint32 senderData = ref->getConnectionPtr(); SyncRecord* tmp = c_syncPool.getPtr(senderData); tmp->runDROP_TRIG_CONF(signal); + DBUG_VOID_RETURN; } /************************************************************************* @@ -2799,7 +2835,7 @@ SumaParticipant::decideWhoToSend(Uint32 nBucket, Uint32 gci){ void SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){ jamEntry(); - + DBUG_ENTER("SumaParticipant::execFIRE_TRIG_ORD"); CRASH_INSERTION(13016); FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr(); const Uint32 trigId = trg->getTriggerId(); @@ -2960,6 +2996,8 @@ SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){ */ f_bufferLock = 0; b_bufferLock = 0; + + DBUG_VOID_RETURN; } void diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index ead9e68526c..c156a26500c 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -92,10 +92,6 @@ int main(int argc, char** argv) } } - globalTransporterRegistry.set_mgm_handle(theConfig - ->get_config_retriever() - ->get_mgmHandle()); - #ifndef NDB_WIN32 for(pid_t child = fork(); child != 0; child = fork()){ /** @@ -203,16 +199,6 @@ int main(int argc, char** argv) globalEmulatorData.m_socket_server->startServer(); - struct ndb_mgm_reply mgm_reply; - for(unsigned int i=0;i<globalTransporterRegistry.m_transporter_interface.size();i++) - ndb_mgm_set_connection_int_parameter(theConfig->get_config_retriever()->get_mgmHandle(), - globalTransporterRegistry.get_localNodeId(), - globalTransporterRegistry.m_transporter_interface[i].m_remote_nodeId, - CFG_CONNECTION_SERVER_PORT, - globalTransporterRegistry.m_transporter_interface[i].m_service_port, - &mgm_reply); - - // theConfig->closeConfiguration(); globalEmulatorData.theThreadConfig->ipControlLoop(); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index de78a4e927c..3170939f8d8 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -88,13 +88,6 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, - argument ? argument : "d:t:O,/tmp/ndbd.trace"); -} bool Configuration::init(int argc, char** argv) @@ -103,7 +96,11 @@ Configuration::init(int argc, char** argv) load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndbd.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) exit(ho_error); if (_no_daemon) { @@ -156,6 +153,7 @@ Configuration::Configuration() _daemonMode = false; m_config_retriever= 0; m_clusterConfig= 0; + m_clusterConfigIter= 0; } Configuration::~Configuration(){ @@ -372,6 +370,8 @@ Configuration::setupConfiguration(){ ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config); + if(m_clusterConfigIter) + ndb_mgm_destroy_iterator(m_clusterConfigIter); m_clusterConfigIter = ndb_mgm_create_configuration_iterator (p, CFG_SECTION_NODE); diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/ndb/src/mgmapi/LocalConfig.cpp index 0265f982df3..75ad8b40a1f 100644 --- a/ndb/src/mgmapi/LocalConfig.cpp +++ b/ndb/src/mgmapi/LocalConfig.cpp @@ -27,7 +27,9 @@ LocalConfig::LocalConfig(){ bool LocalConfig::init(const char *connectString, - const char *fileName) { + const char *fileName) +{ + DBUG_ENTER("LocalConfig::init"); /** * Escalation: * 1. Check connectString @@ -38,21 +40,25 @@ LocalConfig::init(const char *connectString, * 6. Check defaultConnectString */ + _ownNodeId= 0; + //1. Check connectString if(connectString != 0 && connectString[0] != 0){ if(readConnectString(connectString, "connect string")){ - return true; - } - return false; + if (ids.size()) + DBUG_RETURN(true); + // only nodeid given, continue to find hosts + } else + DBUG_RETURN(false); } //2. Check given filename if (fileName && strlen(fileName) > 0) { bool fopenError; if(readFile(fileName, fopenError)){ - return true; + DBUG_RETURN(true); } - return false; + DBUG_RETURN(false); } //3. Check environment variable @@ -60,9 +66,9 @@ LocalConfig::init(const char *connectString, if(NdbEnv_GetEnv("NDB_CONNECTSTRING", buf, sizeof(buf)) && strlen(buf) != 0){ if(readConnectString(buf, "NDB_CONNECTSTRING")){ - return true; + DBUG_RETURN(true); } - return false; + DBUG_RETURN(false); } //4. Check Ndb.cfg in NDB_HOME @@ -71,9 +77,9 @@ LocalConfig::init(const char *connectString, char *buf= NdbConfig_NdbCfgName(1 /*true*/); NdbAutoPtr<char> tmp_aptr(buf); if(readFile(buf, fopenError)) - return true; + DBUG_RETURN(true); if (!fopenError) - return false; + DBUG_RETURN(false); } //5. Check Ndb.cfg in cwd @@ -82,9 +88,9 @@ LocalConfig::init(const char *connectString, char *buf= NdbConfig_NdbCfgName(0 /*false*/); NdbAutoPtr<char> tmp_aptr(buf); if(readFile(buf, fopenError)) - return true; + DBUG_RETURN(true); if (!fopenError) - return false; + DBUG_RETURN(false); } //7. Check @@ -92,12 +98,12 @@ LocalConfig::init(const char *connectString, char buf[256]; BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_PORT); if(readConnectString(buf, "default connect string")) - return true; + DBUG_RETURN(true); } setError(0, ""); - return false; + DBUG_RETURN(false); } LocalConfig::~LocalConfig(){ @@ -142,6 +148,7 @@ const char *nodeIdTokens[] = { const char *hostNameTokens[] = { "host://%[^:]:%i", "host=%[^:]:%i", + "mgmd=%[^:]:%i", "%[^:^=^ ]:%i", "%s %i", 0 @@ -207,36 +214,22 @@ LocalConfig::parseString(const char * connectString, BaseString &err){ char * copy = strdup(connectString); NdbAutoPtr<char> tmp_aptr(copy); - bool b_nodeId = false; - bool found_other = false; - for (char *tok = strtok_r(copy,";,",&for_strtok); tok != 0; tok = strtok_r(NULL, ";,", &for_strtok)) { if (tok[0] == '#') continue; - if (!b_nodeId) // only one nodeid definition allowed - if (b_nodeId = parseNodeId(tok)) + if (!_ownNodeId) // only one nodeid definition allowed + if (parseNodeId(tok)) continue; - if (found_other = parseHostName(tok)) + if (parseHostName(tok)) continue; - if (found_other = parseFileName(tok)) + if (parseFileName(tok)) continue; err.assfmt("Unexpected entry: \"%s\"", tok); return false; } - if (b_nodeId && !found_other) - { - BaseString tmp; - tmp.assfmt("host=localhost:%s", NDB_PORT); - if(parseHostName(tmp.c_str())) - return true; - - err.appfmt("Missing host/file name extry in \"%s\"", connectString); - return false; - } - return true; } diff --git a/ndb/src/mgmapi/Makefile.am b/ndb/src/mgmapi/Makefile.am index 2f2fb407e46..db730bf8c89 100644 --- a/ndb/src/mgmapi/Makefile.am +++ b/ndb/src/mgmapi/Makefile.am @@ -1,7 +1,7 @@ noinst_LTLIBRARIES = libmgmapi.la -libmgmapi_la_SOURCES = mgmapi.cpp mgmapi_configuration.cpp LocalConfig.cpp +libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 94ae55e0f51..bd4052f51d9 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -22,8 +22,8 @@ #include <NdbSleep.h> #include <NdbTCP.h> -#include "mgmapi.h" -#include "mgmapi_debug.h" +#include <mgmapi.h> +#include <mgmapi_debug.h> #include "mgmapi_configuration.hpp" #include <socket_io.h> @@ -151,7 +151,7 @@ ndb_mgm_create_handle() h->socket = NDB_INVALID_SOCKET; h->read_timeout = 50000; h->write_timeout = 100; - h->cfg_i = 0; + h->cfg_i = -1; strncpy(h->last_error_desc, "No error", NDB_MGM_MAX_ERR_DESC_SIZE); @@ -169,17 +169,18 @@ extern "C" int ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv) { + DBUG_ENTER("ndb_mgm_set_connectstring"); new (&(handle->cfg)) LocalConfig; if (!handle->cfg.init(mgmsrv, 0) || handle->cfg.ids.size() == 0) { new (&(handle->cfg)) LocalConfig; - handle->cfg.init(0, 0); /* reset the LocalCongig */ + handle->cfg.init(0, 0); /* reset the LocalConfig */ SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, ""); - return -1; + DBUG_RETURN(-1); } - handle->cfg_i= 0; - return 0; + handle->cfg_i= -1; + DBUG_RETURN(0); } /** @@ -191,6 +192,10 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle) { if(!handle) return; + /** + * important! only disconnect if connected + * other code relies on this + */ if((* handle)->connected){ ndb_mgm_disconnect(* handle); } @@ -331,6 +336,17 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, } /** + * Returns true if connected + */ +extern "C" +int ndb_mgm_is_connected(NdbMgmHandle handle) +{ + if(!handle) + return 0; + return handle->connected; +} + +/** * Connect to a management server */ extern "C" @@ -341,6 +357,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_connect"); CHECK_HANDLE(handle, -1); + DBUG_ENTER("ndb_mgm_connect"); #ifdef MGMAPI_LOG /** * Open the log file @@ -370,6 +387,13 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, } if (sockfd != NDB_INVALID_SOCKET) break; +#ifndef DBUG_OFF + { + char buf[1024]; + DBUG_PRINT("info",("Unable to connect with connect string: %s", + cfg.makeConnectString(buf,sizeof(buf)))); + } +#endif if (verbose > 0) { char buf[1024]; ndbout_c("Unable to connect with connect string: %s", @@ -383,7 +407,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, cfg.makeConnectString(buf,sizeof(buf))); if (verbose == -2) ndbout << ", failed." << endl; - return -1; + DBUG_RETURN(-1); } if (verbose == -1) { ndbout << "Retrying every " << retry_delay_in_seconds << " seconds"; @@ -411,7 +435,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, handle->socket = sockfd; handle->connected = 1; - return 0; + DBUG_RETURN(0); } /** @@ -1156,9 +1180,9 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, return 0; } -extern "C" int -ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]) +ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[], + int parsable) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event"); const ParserRow<ParserDummy> stat_reply[] = { @@ -1180,6 +1204,9 @@ ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]) } Properties args; + + if (parsable) + args.put("parsable", parsable); { BaseString tmp; for(int i = 0; filter[i] != 0; i += 2){ @@ -1204,6 +1231,13 @@ ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]) } extern "C" +int +ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]) +{ + return ndb_mgm_listen_event_internal(handle,filter,0); +} + +extern "C" int ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/) { @@ -1747,13 +1781,19 @@ ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle) extern "C" int ndb_mgm_get_connected_port(NdbMgmHandle handle) { - return handle->cfg.ids[handle->cfg_i].port; + if (handle->cfg_i >= 0) + return handle->cfg.ids[handle->cfg_i].port; + else + return 0; } extern "C" const char *ndb_mgm_get_connected_host(NdbMgmHandle handle) { - return handle->cfg.ids[handle->cfg_i].name.c_str(); + if (handle->cfg_i >= 0) + return handle->cfg.ids[handle->cfg_i].name.c_str(); + else + return 0; } extern "C" @@ -2063,7 +2103,7 @@ ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle, int node1, int node2, int param, - unsigned value, + int value, struct ndb_mgm_reply* mgmreply){ DBUG_ENTER("ndb_mgm_set_connection_int_parameter"); CHECK_HANDLE(handle, 0); @@ -2073,7 +2113,7 @@ ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle, args.put("node1", node1); args.put("node2", node2); args.put("param", param); - args.put("value", value); + args.put("value", (Uint32)value); const ParserRow<ParserDummy> reply[]= { MGM_CMD("set connection parameter reply", NULL, ""), @@ -2097,7 +2137,7 @@ ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle, } while(0); delete prop; - return res; + DBUG_RETURN(res); } extern "C" @@ -2106,11 +2146,11 @@ ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle, int node1, int node2, int param, - Uint32 *value, + int *value, struct ndb_mgm_reply* mgmreply){ DBUG_ENTER("ndb_mgm_get_connection_int_parameter"); CHECK_HANDLE(handle, -1); - CHECK_CONNECTED(handle, -1); + CHECK_CONNECTED(handle, -2); Properties args; args.put("node1", node1); @@ -2126,7 +2166,7 @@ ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get connection parameter", &args); - CHECK_REPLY(prop, -2); + CHECK_REPLY(prop, -3); int res= -1; do { @@ -2138,14 +2178,34 @@ ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle, res= 0; } while(0); - if(!prop->get("value",value)){ + if(!prop->get("value",(Uint32*)value)){ ndbout_c("Unable to get value"); - res = -3; + res = -4; } delete prop; DBUG_RETURN(res); } +extern "C" +NDB_SOCKET_TYPE +ndb_mgm_convert_to_transporter(NdbMgmHandle handle) +{ + NDB_SOCKET_TYPE s; + + CHECK_HANDLE(handle, NDB_INVALID_SOCKET); + CHECK_CONNECTED(handle, NDB_INVALID_SOCKET); + + handle->connected= 0; // we pretend we're disconnected + s= handle->socket; + + SocketOutputStream s_output(s); + s_output.println("transporter connect"); + s_output.println(""); + + ndb_mgm_destroy_handle(&handle); // set connected=0, so won't disconnect + + return s; +} template class Vector<const ParserRow<ParserDummy>*>; diff --git a/ndb/src/mgmapi/mgmapi_configuration.hpp b/ndb/src/mgmapi/mgmapi_configuration.hpp index 9e94b3311bf..7d60a4842a1 100644 --- a/ndb/src/mgmapi/mgmapi_configuration.hpp +++ b/ndb/src/mgmapi/mgmapi_configuration.hpp @@ -1,3 +1,19 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #ifndef MGMAPI_CONFIGURATION_HPP #define MGMAPI_CONFIGURATION_HPP diff --git a/ndb/src/mgmapi/ndb_logevent.cpp b/ndb/src/mgmapi/ndb_logevent.cpp new file mode 100644 index 00000000000..2817abcfdbb --- /dev/null +++ b/ndb/src/mgmapi/ndb_logevent.cpp @@ -0,0 +1,483 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <ndb_global.h> +#include <my_sys.h> +#include <mgmapi.h> + +#include <NdbOut.hpp> +#include <Properties.hpp> +#include <socket_io.h> +#include <InputStream.hpp> + +#include <debugger/EventLogger.hpp> + +#include "ndb_logevent.hpp" + +extern +int ndb_mgm_listen_event_internal(NdbMgmHandle, const int filter[], int); + +struct ndb_logevent_error_msg { + enum ndb_logevent_handle_error code; + const char *msg; +}; + +struct ndb_logevent_error_msg ndb_logevent_error_messages[]= { + { NDB_LEH_READ_ERROR, "Read error" }, + { NDB_LEH_MISSING_EVENT_SPECIFIER, "Missing event specifier" }, + { NDB_LEH_UNKNOWN_EVENT_VARIABLE, "Unknown event variable" }, + { NDB_LEH_UNKNOWN_EVENT_TYPE, "Unknown event type" }, + { NDB_LEH_INTERNAL_ERROR, "Unknown internal error" }, + { NDB_LEH_NO_ERROR,0} +}; + +struct ndb_logevent_handle { + NDB_SOCKET_TYPE socket; + enum ndb_logevent_handle_error m_error; +}; + +extern "C" +NdbLogEventHandle +ndb_mgm_create_logevent_handle(NdbMgmHandle mh, + const int filter[]) +{ + int fd= ndb_mgm_listen_event_internal(mh, filter, 1); + + if (fd == -1) + return 0; + + NdbLogEventHandle h= + (NdbLogEventHandle)my_malloc(sizeof(ndb_logevent_handle),MYF(MY_WME)); + + h->socket= fd; + + return h; +} + +extern "C" +void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle * h) +{ + if( !h ) + return; + + if ( *h ) + close((*h)->socket); + + my_free((char*)* h,MYF(MY_ALLOW_ZERO_PTR)); + * h = 0; +} + +#define ROW(a,b,c,d) \ +{ NDB_LE_ ## a, b, c, 0, offsetof(struct ndb_logevent, a.d), \ + sizeof(((struct ndb_logevent *)0)->a.d) } + +#define ROW_FN(a,b,c,d,e) \ +{ NDB_LE_ ## a, b, c, e, offsetof(struct ndb_logevent, a.d), \ + sizeof(((struct ndb_logevent *)0)->a.d) } + +static int ref_to_node(int ref){ + return ref & 0xFFFF; +} + +struct Ndb_logevent_body_row ndb_logevent_body[]= { + + // Connection + ROW( Connected, "node", 1, node), + + ROW( Disconnected, "node", 1, node), + + ROW( CommunicationClosed, "node", 1, node), + + ROW( CommunicationOpened, "node", 1, node), + + ROW( ConnectedApiVersion, "node", 1, node), + ROW( ConnectedApiVersion, "version", 2, version), + + /* CHECKPOINT */ + + ROW( GlobalCheckpointStarted, "gci", 1, gci), + + ROW( GlobalCheckpointCompleted, "gci", 1, gci), + + ROW( LocalCheckpointStarted, "lci", 1, lci), + ROW( LocalCheckpointStarted, "keep_gci", 2, keep_gci), + ROW( LocalCheckpointStarted, "restore_gci", 3, restore_gci), + + ROW( LocalCheckpointCompleted, "lci", 1, lci), + + ROW( LCPStoppedInCalcKeepGci, "data", 1, data), + + ROW( LCPFragmentCompleted, "node", 1, node), + ROW( LCPFragmentCompleted, "table_id", 2, table_id), + ROW( LCPFragmentCompleted, "fragment_id", 3, fragment_id), + + ROW( UndoLogBlocked, "acc_count", 1, acc_count), + ROW( UndoLogBlocked, "tup_count", 2, tup_count), + + /* STARTUP */ + ROW( NDBStartStarted, "version", 1, version), + + ROW( NDBStartCompleted, "version", 1, version), + +// ROW( STTORRYRecieved), + + ROW( StartPhaseCompleted, "phase", 1, phase), + ROW( StartPhaseCompleted, "starttype", 2, starttype), + + ROW( CM_REGCONF, "own_id", 1, own_id), + ROW( CM_REGCONF, "president_id", 2, president_id), + ROW( CM_REGCONF, "dynamic_id", 3, dynamic_id), + + ROW( CM_REGREF, "own_id", 1, own_id), + ROW( CM_REGREF, "other_id", 2, other_id), + ROW( CM_REGREF, "cause", 3, cause), + + ROW( FIND_NEIGHBOURS, "own_id", 1, own_id), + ROW( FIND_NEIGHBOURS, "left_id", 3, left_id), + ROW( FIND_NEIGHBOURS, "right_id", 3, right_id), + ROW( FIND_NEIGHBOURS, "dynamic_id", 4, dynamic_id), + + ROW( NDBStopStarted, "stoptype", 1, stoptype), + +// ROW( NDBStopAborted), + + ROW( StartREDOLog, "node", 1, node), + ROW( StartREDOLog, "keep_gci", 2, keep_gci), + ROW( StartREDOLog, "completed_gci", 3, completed_gci), + ROW( StartREDOLog, "restorable_gci", 4, restorable_gci), + + ROW( StartLog, "log_part", 1, log_part), + ROW( StartLog, "start_mb", 2, start_mb), + ROW( StartLog, "stop_mb", 3, stop_mb), + ROW( StartLog, "gci", 4, gci), + + ROW( UNDORecordsExecuted, "block", 1, block), + ROW( UNDORecordsExecuted, "data1", 2, data1), + ROW( UNDORecordsExecuted, "data2", 3, data2), + ROW( UNDORecordsExecuted, "data3", 4, data3), + ROW( UNDORecordsExecuted, "data4", 5, data4), + ROW( UNDORecordsExecuted, "data5", 6, data5), + ROW( UNDORecordsExecuted, "data6", 7, data6), + ROW( UNDORecordsExecuted, "data7", 8, data7), + ROW( UNDORecordsExecuted, "data8", 9, data8), + ROW( UNDORecordsExecuted, "data9", 10, data9), + ROW( UNDORecordsExecuted, "data10", 11, data10), + + /* NODERESTART */ +// ROW( NR_CopyDict), + +// ROW( NR_CopyDistr), + + ROW( NR_CopyFragsStarted, "dest_node", 1, dest_node), + + ROW( NR_CopyFragDone, "dest_node", 1, dest_node), + ROW( NR_CopyFragDone, "table_id", 2, table_id), + ROW( NR_CopyFragDone, "fragment_id", 3, fragment_id), + + ROW( NR_CopyFragsCompleted, "dest_node", 1, dest_node), + + ROW( NodeFailCompleted, "block", 1, block), /* 0 = all */ + ROW( NodeFailCompleted, "failed_node", 2, failed_node), + ROW( NodeFailCompleted, "completing_node", 3, completing_node), /* 0 = all */ + + ROW( NODE_FAILREP, "failed_node", 1, failed_node), + ROW( NODE_FAILREP, "failure_state", 2, failure_state), + + /* TODO */ + ROW( ArbitState, "code", 1, code), + ROW( ArbitState, "arbit_node", 2, arbit_node), + ROW( ArbitState, "ticket_0", 3, ticket_0), + ROW( ArbitState, "ticket_1", 4, ticket_1), + + /* TODO */ + ROW( ArbitResult, "code", 1, code), + ROW( ArbitResult, "arbit_node", 2, arbit_node), + ROW( ArbitResult, "ticket_0", 3, ticket_0), + ROW( ArbitResult, "ticket_1", 4, ticket_1), + +// ROW( GCP_TakeoverStarted), + +// ROW( GCP_TakeoverCompleted), + +// ROW( LCP_TakeoverStarted), + + ROW( LCP_TakeoverCompleted, "state", 1, state), + + /* STATISTIC */ + ROW( TransReportCounters, "trans_count", 1, trans_count), + ROW( TransReportCounters, "commit_count", 2, commit_count), + ROW( TransReportCounters, "read_count", 3, read_count), + ROW( TransReportCounters, "simple_read_count", 4, simple_read_count), + ROW( TransReportCounters, "write_count", 5, write_count), + ROW( TransReportCounters, "attrinfo_count", 6, attrinfo_count), + ROW( TransReportCounters, "conc_op_count", 7, conc_op_count), + ROW( TransReportCounters, "abort_count", 8, abort_count), + ROW( TransReportCounters, "scan_count", 9, scan_count), + ROW( TransReportCounters, "range_scan_count", 10, range_scan_count), + + ROW( OperationReportCounters, "ops", 1, ops), + + ROW( TableCreated, "table_id", 1, table_id), + + ROW( JobStatistic, "mean_loop_count", 1, mean_loop_count), + + ROW( SendBytesStatistic, "to_node", 1, to_node), + ROW( SendBytesStatistic, "mean_sent_bytes", 2, mean_sent_bytes), + + ROW( ReceiveBytesStatistic, "from_node", 1, from_node), + ROW( ReceiveBytesStatistic, "mean_received_bytes", 2, mean_received_bytes), + + ROW( MemoryUsage, "gth", 1, gth), + ROW( MemoryUsage, "page_size_kb", 2, page_size_kb), + ROW( MemoryUsage, "pages_used", 3, pages_used), + ROW( MemoryUsage, "pages_total", 4, pages_total), + ROW( MemoryUsage, "block", 5, block), + + /* ERROR */ + ROW( TransporterError, "to_node", 1, to_node), + ROW( TransporterError, "code", 2, code), + + ROW( TransporterWarning, "to_node", 1, to_node), + ROW( TransporterWarning, "code", 2, code), + + ROW( MissedHeartbeat, "node", 1, node), + ROW( MissedHeartbeat, "count", 2, count), + + ROW( DeadDueToHeartbeat, "node", 1, node), + + /* TODO */ +// ROW( WarningEvent), + + /* INFO */ + ROW( SentHeartbeat, "node", 1, node), + + ROW( CreateLogBytes, "node", 1, node), + + /* TODO */ +// ROW( InfoEvent), + + // Backup + ROW_FN( BackupStarted, "starting_node", 1, starting_node, ref_to_node), + ROW( BackupStarted, "backup_id", 2, backup_id), + + ROW_FN(BackupFailedToStart,"starting_node",1, starting_node, ref_to_node), + ROW( BackupFailedToStart, "error", 2, error), + + ROW_FN( BackupCompleted, "starting_node", 1, starting_node, ref_to_node), + ROW( BackupCompleted, "backup_id", 2, backup_id), + ROW( BackupCompleted, "start_gci", 3, start_gci), + ROW( BackupCompleted, "stop_gci", 4, stop_gci), + ROW( BackupCompleted, "n_bytes", 5, n_bytes), + ROW( BackupCompleted, "n_records", 6, n_records), + ROW( BackupCompleted, "n_log_bytes", 7, n_log_bytes), + ROW( BackupCompleted, "n_log_records", 8, n_log_records), + + ROW_FN( BackupAborted, "starting_node", 1, starting_node, ref_to_node), + ROW( BackupAborted, "backup_id", 2, backup_id), + ROW( BackupAborted, "error", 3, error), + + { NDB_LE_ILLEGAL_TYPE, 0, 0, 0, 0, 0} +}; + +struct Ndb_logevent_header_row { + const char *token; // token to use for text transfer + int offset; // offset into struct ndb_logevent + int size; +}; + +#define ROW2(a,b) \ +{ a, offsetof(struct ndb_logevent, b), \ + sizeof(((struct ndb_logevent *)0)->b) } + +struct Ndb_logevent_header_row ndb_logevent_header[]= { + ROW2( "type", type), + ROW2( "time", time), + ROW2( "source_nodeid", source_nodeid), + { 0, 0, 0 } +}; + +static int +insert_row(const char * pair, Properties & p){ + BaseString tmp(pair); + + tmp.trim(" \t\n\r"); + Vector<BaseString> split; + tmp.split(split, ":=", 2); + if(split.size() != 2) + return -1; + p.put(split[0].trim().c_str(), split[1].trim().c_str()); + + return 0; +} + +static +int memcpy_atoi(void *dst, const char *str, int sz) +{ + switch (sz) + { + case 1: + { + Int8 val= atoi(str); + memcpy(dst,&val,sz); + return 0; + } + case 2: + { + Int16 val= atoi(str); + memcpy(dst,&val,sz); + return 0; + } + case 4: + { + Int32 val= atoi(str); + memcpy(dst,&val,sz); + return 0; + } + case 8: + { + Int64 val= atoi(str); + memcpy(dst,&val,sz); + return 0; + } + default: + { + return -1; + } + } +} + +extern "C" +int ndb_logevent_get_next(const NdbLogEventHandle h, + struct ndb_logevent *dst, + unsigned timeout_in_milliseconds) +{ + SocketInputStream in(h->socket, timeout_in_milliseconds); + + Properties p; + char buf[256]; + + /* header */ + while (1) { + if (in.gets(buf,sizeof(buf)) == 0) + { + h->m_error= NDB_LEH_READ_ERROR; + return -1; + } + if ( buf[0] == 0 ) + { + // timed out + return 0; + } + if ( strcmp("log event reply\n", buf) == 0 ) + break; + ndbout_c("skipped: %s", buf); + } + + /* read name-value pairs into properties object */ + while (1) + { + if (in.gets(buf,sizeof(buf)) == 0) + { + h->m_error= NDB_LEH_READ_ERROR; + return -1; + } + if ( buf[0] == 0 ) + { + // timed out + return 0; + } + if ( buf[0] == '\n' ) + { + break; + } + if (insert_row(buf,p)) + { + h->m_error= NDB_LEH_READ_ERROR; + return -1; + } + } + + int i; + const char *val; + + dst->type= (enum Ndb_logevent_type)-1; + /* fill in header info from p*/ + for (i= 0; ndb_logevent_header[i].token; i++) + { + if ( p.get(ndb_logevent_header[i].token, &val) == 0 ) + { + ndbout_c("missing: %s\n", ndb_logevent_header[i].token); + h->m_error= NDB_LEH_MISSING_EVENT_SPECIFIER; + return -1; + } + if ( memcpy_atoi((char *)dst+ndb_logevent_header[i].offset, val, + ndb_logevent_header[i].size) ) + { + h->m_error= NDB_LEH_INTERNAL_ERROR; + return -1; + } + } + + Uint32 level; + LogLevel::EventCategory category; + Logger::LoggerLevel severity; + EventLoggerBase::EventTextFunction text_fn; + + /* fill in rest of header info event_lookup */ + if (EventLoggerBase::event_lookup(dst->type,category,level,severity,text_fn)) + { + ndbout_c("unknown type: %d\n", dst->type); + h->m_error= NDB_LEH_UNKNOWN_EVENT_TYPE; + return -1; + } + dst->category= (enum ndb_mgm_event_category)category; + dst->severity= (enum ndb_mgm_event_severity)severity; + dst->level= level; + + /* fill in header info from p */ + for (i= 0; ndb_logevent_body[i].token; i++) + { + if ( ndb_logevent_body[i].type != dst->type ) + continue; + if ( p.get(ndb_logevent_body[i].token, &val) == 0 ) + { + h->m_error= NDB_LEH_UNKNOWN_EVENT_VARIABLE; + return -1; + } + if ( memcpy_atoi((char *)dst+ndb_logevent_body[i].offset, val, + ndb_logevent_body[i].size) ) + { + h->m_error= NDB_LEH_INTERNAL_ERROR; + return -1; + } + } + return 1; +} + +extern "C" +int ndb_logevent_get_latest_error(const NdbLogEventHandle h) +{ + return h->m_error; +} + +extern "C" +const char *ndb_logevent_get_latest_error_msg(const NdbLogEventHandle h) +{ + for (int i= 0; ndb_logevent_error_messages[i].msg; i++) + if (ndb_logevent_error_messages[i].code == h->m_error) + return ndb_logevent_error_messages[i].msg; + return "<unknown error msg>"; +} diff --git a/ndb/src/mgmapi/ndb_logevent.hpp b/ndb/src/mgmapi/ndb_logevent.hpp new file mode 100644 index 00000000000..cb1a0e388e5 --- /dev/null +++ b/ndb/src/mgmapi/ndb_logevent.hpp @@ -0,0 +1,34 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDB_LOGEVENT_HPP +#define NDB_LOGEVENT_HPP + +#include <ndb_logevent.h> + +struct Ndb_logevent_body_row { + enum Ndb_logevent_type type; // type + const char *token; // token to use for text transfer + int index; // index into theData array + int (*index_fn)(int); // conversion function on the data array[index] + int offset; // offset into struct ndb_logevent + int size; // offset into struct ndb_logevent +}; + +extern +struct Ndb_logevent_body_row ndb_logevent_body[]; + +#endif diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index f49f6ad3e6e..389fc08b97c 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -389,7 +389,7 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose) } m_mgmsrv2 = ndb_mgm_create_handle(); if(m_mgmsrv2 == NULL) { - ndbout_c("Cannot create handle to management server."); + ndbout_c("Cannot create 2:nd handle to management server."); exit(-1); } if (ndb_mgm_set_connectstring(m_mgmsrv, _host)) @@ -482,17 +482,20 @@ event_thread_run(void* m) bool CommandInterpreter::connect() { + DBUG_ENTER("CommandInterpreter::connect"); if(!m_connected) { if(!ndb_mgm_connect(m_mgmsrv, try_reconnect-1, 5, 1)) { const char *host= ndb_mgm_get_connected_host(m_mgmsrv); unsigned port= ndb_mgm_get_connected_port(m_mgmsrv); - if(!ndb_mgm_set_connectstring(m_mgmsrv2, - BaseString(host).appfmt(":%d",port).c_str()) - && + BaseString constr; + constr.assfmt("%s:%d",host,port); + if(!ndb_mgm_set_connectstring(m_mgmsrv2, constr.c_str()) && !ndb_mgm_connect(m_mgmsrv2, try_reconnect-1, 5, 1)) { + DBUG_PRINT("info",("2:ndb connected to Management Server ok at: %s:%d", + host, port)); assert(m_event_thread == 0); assert(do_event_thread == 0); do_event_thread= 0; @@ -503,6 +506,7 @@ CommandInterpreter::connect() NDB_THREAD_PRIO_LOW); if (m_event_thread != 0) { + DBUG_PRINT("info",("Thread created ok, waiting for started...")); int iter= 1000; // try for 30 seconds while(do_event_thread == 0 && iter-- > 0) @@ -512,15 +516,25 @@ CommandInterpreter::connect() do_event_thread == 0 || do_event_thread == -1) { + DBUG_PRINT("warning",("thread not started")); printf("Warning, event thread startup failed, degraded printouts as result\n"); do_event_thread= 0; } } else { + DBUG_PRINT("warning", + ("Could not do 2:nd connect to mgmtserver for event listening")); + DBUG_PRINT("info", ("code: %d, msg: %s", + ndb_mgm_get_latest_error(m_mgmsrv2), + ndb_mgm_get_latest_error_msg(m_mgmsrv2))); printf("Warning, event connect failed, degraded printouts as result\n"); + printf("code: %d, msg: %s\n", + ndb_mgm_get_latest_error(m_mgmsrv2), + ndb_mgm_get_latest_error_msg(m_mgmsrv2)); } m_connected= true; + DBUG_PRINT("info",("Connected to Management Server at: %s:%d", host, port)); if (m_verbose) { printf("Connected to Management Server at: %s:%d\n", @@ -528,7 +542,7 @@ CommandInterpreter::connect() } } } - return m_connected; + DBUG_RETURN(m_connected); } bool diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp index 73f0bad86c0..19c84f6ec8d 100644 --- a/ndb/src/mgmclient/main.cpp +++ b/ndb/src/mgmclient/main.cpp @@ -87,13 +87,6 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_mgm.trace"); -} static int read_and_execute(int _try_reconnect) @@ -136,7 +129,11 @@ int main(int argc, char** argv){ load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_mgm.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) exit(ho_error); char buf[MAXHOSTNAMELEN+10]; diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index d2682d7dd58..5556855b9f7 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -3132,8 +3132,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ const Properties * node; require(ctx.m_config->get("Node", id1, &node)); + BaseString hostname(hostName1); - // require(node->get("HostName", hostname)); if (hostname.c_str()[0] == 0) { ctx.reportError("Hostname required on nodeid %d since it will " @@ -3142,29 +3142,51 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ } Uint32 port= 0; - if (!node->get("ServerPort", &port) && - !ctx.m_userProperties.get("ServerPort_", id1, &port)) { - Uint32 adder= 0; - { - BaseString server_port_adder(hostname); - server_port_adder.append("_ServerPortAdder"); - ctx.m_userProperties.get(server_port_adder.c_str(), &adder); - ctx.m_userProperties.put(server_port_adder.c_str(), adder+1, true); - } + const char * type1; + const char * type2; + const Properties * node2; + + node->get("Type", &type1); + ctx.m_config->get("Node", id2, &node2); + node2->get("Type", &type2); + + if(strcmp(type1, MGM_TOKEN)==0) + node->get("PortNumber",&port); + else if(strcmp(type2, MGM_TOKEN)==0) + node2->get("PortNumber",&port); + if (!port && + !node->get("ServerPort", &port) && + !ctx.m_userProperties.get("ServerPort_", id1, &port)) + { Uint32 base= 0; - if (!ctx.m_userProperties.get("ServerPortBase", &base)){ - if(!(ctx.m_userDefaults && + /* + * If the connection doesn't involve an mgm server, + * and a default port number has been set, behave the old + * way of allocating port numbers for transporters. + */ + if(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) + { + Uint32 adder= 0; + { + BaseString server_port_adder(hostname); + server_port_adder.append("_ServerPortAdder"); + ctx.m_userProperties.get(server_port_adder.c_str(), &adder); + ctx.m_userProperties.put(server_port_adder.c_str(), adder+1, true); + } + + if (!ctx.m_userProperties.get("ServerPortBase", &base)){ + if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) && - !ctx.m_systemDefaults->get("PortNumber", &base)) { - base= strtoll(NDB_TCP_BASE_PORT,0,0); - // ctx.reportError("Cannot retrieve base port number"); - // return false; + !ctx.m_systemDefaults->get("PortNumber", &base)) { + base= strtoll(NDB_TCP_BASE_PORT,0,0); + } + ctx.m_userProperties.put("ServerPortBase", base); } - ctx.m_userProperties.put("ServerPortBase", base); + + port= base + adder; + ctx.m_userProperties.put("ServerPort_", id1, port); } - port= base + adder; - ctx.m_userProperties.put("ServerPort_", id1, port); } if(ctx.m_currentSection->contains("PortNumber")) { @@ -3177,6 +3199,7 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){ { ctx.m_currentSection->put("PortNumber", port); } + DBUG_PRINT("info", ("connection %d-%d port %d host %s", id1, id2, port, hostname.c_str())); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 5eb7fe33e48..e915216c793 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -189,16 +189,16 @@ MgmtSrvr::logLevelThreadRun() void MgmtSrvr::startEventLog() { + NdbMutex_Lock(m_configMutex); + g_eventLogger.setCategory("MgmSrvr"); - ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator - ((ndb_mgm_configuration*)_config->m_configValues, CFG_SECTION_NODE); - if(iter == 0) - return ; - - if(ndb_mgm_find(iter, CFG_NODE_ID, _ownNodeId) != 0){ - ndb_mgm_destroy_iterator(iter); - return ; + ndb_mgm_configuration_iterator + iter(* _config->m_configValues, CFG_SECTION_NODE); + + if(iter.find(CFG_NODE_ID, _ownNodeId) != 0){ + NdbMutex_Unlock(m_configMutex); + return; } const char * tmp; @@ -206,10 +206,10 @@ MgmtSrvr::startEventLog() char *clusterLog= NdbConfig_ClusterLogFileName(_ownNodeId); NdbAutoPtr<char> tmp_aptr(clusterLog); - if(ndb_mgm_get_string_parameter(iter, CFG_LOG_DESTINATION, &tmp) == 0){ + if(iter.get(CFG_LOG_DESTINATION, &tmp) == 0){ logdest.assign(tmp); } - ndb_mgm_destroy_iterator(iter); + NdbMutex_Unlock(m_configMutex); if(logdest.length() == 0 || logdest == "") { logdest.assfmt("FILE:filename=%s,maxsize=1000000,maxfiles=6", @@ -343,42 +343,41 @@ MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const } int -MgmtSrvr::getPort() const { - const Properties *mgmProps; - - ndb_mgm_configuration_iterator * iter = - ndb_mgm_create_configuration_iterator(_config->m_configValues, - CFG_SECTION_NODE); - if(iter == 0) +MgmtSrvr::getPort() const +{ + if(NdbMutex_Lock(m_configMutex)) return 0; - if(ndb_mgm_find(iter, CFG_NODE_ID, getOwnNodeId()) != 0){ + ndb_mgm_configuration_iterator + iter(* _config->m_configValues, CFG_SECTION_NODE); + + if(iter.find(CFG_NODE_ID, getOwnNodeId()) != 0){ ndbout << "Could not retrieve configuration for Node " << getOwnNodeId() << " in config file." << endl << "Have you set correct NodeId for this node?" << endl; - ndb_mgm_destroy_iterator(iter); + NdbMutex_Unlock(m_configMutex); return 0; } unsigned type; - if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0 || + if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0 || type != NODE_TYPE_MGM){ ndbout << "Local node id " << getOwnNodeId() << " is not defined as management server" << endl << "Have you set correct NodeId for this node?" << endl; - ndb_mgm_destroy_iterator(iter); + NdbMutex_Unlock(m_configMutex); return 0; } Uint32 port = 0; - if(ndb_mgm_get_int_parameter(iter, CFG_MGM_PORT, &port) != 0){ + if(iter.get(CFG_MGM_PORT, &port) != 0){ ndbout << "Could not find PortNumber in the configuration file." << endl; - ndb_mgm_destroy_iterator(iter); + NdbMutex_Unlock(m_configMutex); return 0; } - ndb_mgm_destroy_iterator(iter); - + NdbMutex_Unlock(m_configMutex); + return port; } @@ -472,14 +471,14 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server, { ndb_mgm_configuration_iterator - *iter = ndb_mgm_create_configuration_iterator(_config->m_configValues, - CFG_SECTION_NODE); - for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){ + iter(* _config->m_configValues, CFG_SECTION_NODE); + + for(iter.first(); iter.valid(); iter.next()){ unsigned type, id; - if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0) + if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0) continue; - if(ndb_mgm_get_int_parameter(iter, CFG_NODE_ID, &id) != 0) + if(iter.get(CFG_NODE_ID, &id) != 0) continue; MGM_REQUIRE(id < MAX_NODES); @@ -502,7 +501,6 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server, break; } } - ndb_mgm_destroy_iterator(iter); } _props = NULL; @@ -578,7 +576,7 @@ MgmtSrvr::start(BaseString &error_string) } } theFacade= TransporterFacade::theFacadeInstance - = new TransporterFacade(m_config_retriever->get_mgmHandle()); + = new TransporterFacade(); if(theFacade == 0) { DEBUG("MgmtSrvr.cpp: theFacade is NULL."); @@ -606,6 +604,26 @@ MgmtSrvr::start(BaseString &error_string) theFacade = 0; return false; } + + TransporterRegistry *reg = theFacade->get_registry(); + for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) { + BaseString msg; + DBUG_PRINT("info",("Setting dynamic port %d->%d : %d", + reg->get_localNodeId(), + reg->m_transporter_interface[i].m_remote_nodeId, + reg->m_transporter_interface[i].m_s_service_port + ) + ); + int res = setConnectionDbParameter((int)reg->get_localNodeId(), + (int)reg->m_transporter_interface[i] + .m_remote_nodeId, + (int)CFG_CONNECTION_SERVER_PORT, + reg->m_transporter_interface[i] + .m_s_service_port, + msg); + DBUG_PRINT("info",("Set result: %d: %s",res,msg.c_str())); + } + _ownReference = numberToRef(_blockNumber, _ownNodeId); @@ -1864,8 +1882,6 @@ void MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) { // The way of handling a received signal is taken from the Ndb class. - int returnCode; - int gsn = signal->readSignalNumber(); switch (gsn) { @@ -2167,8 +2183,13 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, int r_config_addr= -1; unsigned type_c= 0; + if(NdbMutex_Lock(m_configMutex)) + { + error_string.appfmt("unable to lock configuration mutex"); + return false; + } ndb_mgm_configuration_iterator - iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE); + iter(* _config->m_configValues, CFG_SECTION_NODE); for(iter.first(); iter.valid(); iter.next()) { unsigned tmp= 0; if(iter.get(CFG_NODE_ID, &tmp)) abort(); @@ -2235,6 +2256,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, "Suggest specifying node id in connectstring,\n" "or specifying unique host names in config file.", id_found, tmp); + NdbMutex_Unlock(m_configMutex); DBUG_RETURN(false); } if (config_hostname == 0) { @@ -2247,6 +2269,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, } id_found= tmp; // mgmt server matched, check for more matches } + NdbMutex_Unlock(m_configMutex); if (id_found) { @@ -2659,13 +2682,18 @@ MgmtSrvr::Allocated_resources::get_nodeid() const int MgmtSrvr::setDbParameter(int node, int param, const char * value, BaseString& msg){ + + if(NdbMutex_Lock(m_configMutex)) + return -1; + /** * Check parameter */ - ndb_mgm_configuration_iterator iter(* _config->m_configValues, - CFG_SECTION_NODE); + ndb_mgm_configuration_iterator + iter(* _config->m_configValues, CFG_SECTION_NODE); if(iter.first() != 0){ msg.assign("Unable to find node section (iter.first())"); + NdbMutex_Unlock(m_configMutex); return -1; } @@ -2673,16 +2701,19 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, if(node != 0){ if(iter.find(CFG_NODE_ID, node) != 0){ msg.assign("Unable to find node (iter.find())"); + NdbMutex_Unlock(m_configMutex); return -1; } if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){ msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))"); + NdbMutex_Unlock(m_configMutex); return -1; } } else { do { if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){ msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))"); + NdbMutex_Unlock(m_configMutex); return -1; } if(type == NODE_TYPE_DB) @@ -2693,6 +2724,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, if(type != NODE_TYPE_DB){ msg.assfmt("Invalid node type or no such node (%d %d)", type, NODE_TYPE_DB); + NdbMutex_Unlock(m_configMutex); return -1; } @@ -2718,6 +2750,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, break; } msg.assign("Could not get parameter"); + NdbMutex_Unlock(m_configMutex); return -1; } while(0); @@ -2755,6 +2788,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, } while(node == 0 && iter.next() == 0); msg.assign("Success"); + NdbMutex_Unlock(m_configMutex); return 0; } @@ -2768,12 +2802,18 @@ MgmtSrvr::setConnectionDbParameter(int node1, DBUG_ENTER("MgmtSrvr::setConnectionDbParameter"); - ndb_mgm_configuration_iterator iter(* _config->m_configValues, - CFG_SECTION_CONNECTION); + if(NdbMutex_Lock(m_configMutex)) + { + DBUG_RETURN(-1); + } + + ndb_mgm_configuration_iterator + iter(* _config->m_configValues, CFG_SECTION_CONNECTION); if(iter.first() != 0){ msg.assign("Unable to find connection section (iter.first())"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-1); } for(;iter.valid();iter.next()) { @@ -2786,12 +2826,14 @@ MgmtSrvr::setConnectionDbParameter(int node1, } if(!iter.valid()) { msg.assign("Unable to find connection between nodes"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-2); } - if(iter.get(param, ¤t_value) < 0) { + if(iter.get(param, ¤t_value) != 0) { msg.assign("Unable to get current value of parameter"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-3); } ConfigValues::Iterator i2(_config->m_configValues->m_config, @@ -2799,16 +2841,19 @@ MgmtSrvr::setConnectionDbParameter(int node1, if(i2.set(param, (unsigned)value) == false) { msg.assign("Unable to set new value of parameter"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-4); } - if(iter.get(param, &new_value) < 0) { + if(iter.get(param, &new_value) != 0) { msg.assign("Unable to get parameter after setting it."); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-5); } msg.assfmt("%u -> %u",current_value,new_value); - return 1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(1); } @@ -2816,20 +2861,26 @@ int MgmtSrvr::getConnectionDbParameter(int node1, int node2, int param, - unsigned *value, + int *value, BaseString& msg){ DBUG_ENTER("MgmtSrvr::getConnectionDbParameter"); - ndb_mgm_configuration_iterator iter(* _config->m_configValues, - CFG_SECTION_CONNECTION); + if(NdbMutex_Lock(m_configMutex)) + { + DBUG_RETURN(-1); + } + + ndb_mgm_configuration_iterator + iter(* _config->m_configValues, CFG_SECTION_CONNECTION); if(iter.first() != 0){ msg.assign("Unable to find connection section (iter.first())"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-1); } for(;iter.valid();iter.next()) { - Uint32 n1,n2; + Uint32 n1=0,n2=0; iter.get(CFG_CONNECTION_NODE_1, &n1); iter.get(CFG_CONNECTION_NODE_2, &n2); if((n1 == (unsigned)node1 && n2 == (unsigned)node2) @@ -2838,18 +2889,43 @@ MgmtSrvr::getConnectionDbParameter(int node1, } if(!iter.valid()) { msg.assign("Unable to find connection between nodes"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-1); } - if(iter.get(param, value) < 0) { + if(iter.get(param, (Uint32*)value) != 0) { msg.assign("Unable to get current value of parameter"); - return -1; + NdbMutex_Unlock(m_configMutex); + DBUG_RETURN(-1); } - msg.assfmt("%u",*value); + msg.assfmt("%d",*value); + NdbMutex_Unlock(m_configMutex); DBUG_RETURN(1); } +void MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd) +{ + if (theFacade->get_registry()->connect_server(sockfd)) + { + /** + * Force an update_connections() so that the + * ClusterMgr and TransporterFacade is up to date + * with the new connection. + * Important for correct node id reservation handling + */ + NdbMutex_Lock(theFacade->theMutexPtr); + theFacade->get_registry()->update_connections(); + NdbMutex_Unlock(theFacade->theMutexPtr); + } +} + +int MgmtSrvr::set_connect_string(const char *str) +{ + return ndb_mgm_set_connectstring(m_config_retriever->get_mgmHandle(),str); +} + + template class Vector<SigMatch>; #if __SUNPRO_CC != 0x560 template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch**, NdbApiSignal**, unsigned); diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 4742d5f6426..95298630230 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -49,6 +49,7 @@ class Ndb_mgmd_event_service : public EventLoggerBase public: struct Event_listener : public EventLoggerBase { NDB_SOCKET_TYPE m_socket; + Uint32 m_parsable; }; private: @@ -510,10 +511,14 @@ public: int setConnectionDbParameter(int node1, int node2, int param, int value, BaseString& msg); int getConnectionDbParameter(int node1, int node2, int param, - unsigned *value, BaseString& msg); + int *value, BaseString& msg); + int set_connect_string(const char *str); + + void transporter_connect(NDB_SOCKET_TYPE sockfd); + + ConfigRetriever *get_config_retriever() { return m_config_retriever; }; - const char *get_connect_address(Uint32 node_id) { return inet_ntoa(m_connect_address[node_id]); } void get_connected_nodes(NodeBitmask &connected_nodes) const; SocketServer *get_socket_server() { return m_socket_server; } diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index f1456157ac5..fdfe2f92aca 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -31,6 +31,7 @@ #include <mgmapi_configuration.hpp> #include <Vector.hpp> #include "Services.hpp" +#include "../mgmapi/ndb_logevent.hpp" extern bool g_StopServer; @@ -256,12 +257,15 @@ ParserRow<MgmApiSession> commands[] = { MGM_CMD("listen event", &MgmApiSession::listen_event, ""), MGM_ARG("node", Int, Optional, "Node"), + MGM_ARG("parsable", Int, Optional, "Parsable"), MGM_ARG("filter", String, Mandatory, "Event category"), MGM_CMD("purge stale sessions", &MgmApiSession::purge_stale_sessions, ""), MGM_CMD("check connection", &MgmApiSession::check_connection, ""), + MGM_CMD("transporter connect", &MgmApiSession::transporter_connect, ""), + MGM_END() }; @@ -314,7 +318,7 @@ MgmApiSession::runSession() { break; } } - if(m_socket >= 0) + if(m_socket != NDB_INVALID_SOCKET) NDB_CLOSE_SOCKET(m_socket); } @@ -568,11 +572,13 @@ MgmApiSession::getConfig_common(Parser_t::Context &, } } + NdbMutex_Lock(m_mgmsrv.m_configMutex); const ConfigValues * cfg = &conf->m_configValues->m_config; const Uint32 size = cfg->getPackedSize(); UtilBuffer src; cfg->pack(src); + NdbMutex_Unlock(m_mgmsrv.m_configMutex); BaseString str; int res = base64_encode(src, str); @@ -1249,25 +1255,49 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) Uint32 threshold; LogLevel::EventCategory cat; Logger::LoggerLevel severity; + EventLoggerBase::EventTextFunction textF; int i; DBUG_ENTER("Ndb_mgmd_event_service::log"); DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId)); - if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity)) + if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF)) DBUG_VOID_RETURN; char m_text[256]; - EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId); + EventLogger::getText(m_text, sizeof(m_text), + textF, theData, nodeId); + + BaseString str("log event reply\n"); + str.appfmt("type=%d\n", eventType); + str.appfmt("time=%d\n", 0); + str.appfmt("source_nodeid=%d\n", nodeId); + for (i= 0; ndb_logevent_body[i].token; i++) + { + if ( ndb_logevent_body[i].type != eventType) + continue; + int val= theData[ndb_logevent_body[i].index]; + if (ndb_logevent_body[i].index_fn) + val= (*(ndb_logevent_body[i].index_fn))(val); + str.appfmt("%s=%d\n",ndb_logevent_body[i].token, val); + } - Vector<NDB_SOCKET_TYPE> copy; + Vector<NDB_SOCKET_TYPE> copy; m_clients.lock(); for(i = m_clients.size() - 1; i >= 0; i--){ if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){ - if(m_clients[i].m_socket != NDB_INVALID_SOCKET && - println_socket(m_clients[i].m_socket, - MAX_WRITE_TIMEOUT, m_text) == -1){ - copy.push_back(m_clients[i].m_socket); - m_clients.erase(i, false); + if(m_clients[i].m_socket != NDB_INVALID_SOCKET) + { + int r; + if (m_clients[i].m_parsable) + r= println_socket(m_clients[i].m_socket, + MAX_WRITE_TIMEOUT, str.c_str()); + else + r= println_socket(m_clients[i].m_socket, + MAX_WRITE_TIMEOUT, m_text); + if (r == -1) { + copy.push_back(m_clients[i].m_socket); + m_clients.erase(i, false); + } } } } @@ -1373,7 +1403,7 @@ void MgmApiSession::getConnectionParameter(Parser_t::Context &ctx, Properties const &args) { BaseString node1, node2, param; - unsigned value = 0; + int value = 0; args.get("node1", node1); args.get("node2", node2); @@ -1387,7 +1417,7 @@ MgmApiSession::getConnectionParameter(Parser_t::Context &ctx, result); m_output->println("get connection parameter reply"); - m_output->println("value: %u", value); + m_output->println("value: %d", value); m_output->println("result: %s", (ret>0)?"Ok":result.c_str()); m_output->println(""); } @@ -1395,15 +1425,17 @@ MgmApiSession::getConnectionParameter(Parser_t::Context &ctx, void MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx, Properties const & args) { - + Uint32 parsable= 0; BaseString node, param, value; args.get("node", node); args.get("filter", param); + args.get("parsable", &parsable); int result = 0; BaseString msg; Ndb_mgmd_event_service::Event_listener le; + le.m_parsable = parsable; le.m_socket = m_socket; Vector<BaseString> list; @@ -1510,5 +1542,17 @@ MgmApiSession::check_connection(Parser_t::Context &ctx, m_output->println(""); } +void +MgmApiSession::transporter_connect(Parser_t::Context &ctx, + Properties const &args) { + NDB_SOCKET_TYPE s= m_socket; + + m_stop= true; + m_stopped= true; // force a stop (no closing socket) + m_socket= NDB_INVALID_SOCKET; // so nobody closes it + + m_mgmsrv.transporter_connect(s); +} + template class MutexVector<int>; template class Vector<ParserRow<MgmApiSession> const*>; diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp index d7334ee1c5f..e4fddea7d04 100644 --- a/ndb/src/mgmsrv/Services.hpp +++ b/ndb/src/mgmsrv/Services.hpp @@ -97,6 +97,8 @@ public: void purge_stale_sessions(Parser_t::Context &ctx, const class Properties &args); void check_connection(Parser_t::Context &ctx, const class Properties &args); + + void transporter_connect(Parser_t::Context &ctx, Properties const &args); void repCommand(Parser_t::Context &ctx, const class Properties &args); }; diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 67975f8539d..3335fdc827c 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -110,7 +110,7 @@ struct MgmGlobals { NodeId localNodeId; bool use_specific_ip; char * interface_name; - int port; + short unsigned int port; /** The Mgmt Server */ MgmtSrvr * mgmObject; @@ -175,14 +175,6 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_mgmd.trace"); - return 0; -} /* * MAIN @@ -206,7 +198,11 @@ int main(int argc, char** argv) load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_mgmd.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) exit(ho_error); if (glob.interactive || @@ -258,7 +254,7 @@ int main(int argc, char** argv) glob.interface_name = 0; } - if(!glob.socketServer->setup(mapi, glob.port, glob.interface_name)){ + if(!glob.socketServer->setup(mapi, &glob.port, glob.interface_name)){ ndbout_c("Unable to setup management port: %d!\n" "Please check if the port is already used,\n" "(perhaps a ndb_mgmd is already running),\n" @@ -267,13 +263,36 @@ int main(int argc, char** argv) delete mapi; goto error_end; } - + + /* Construct a fake connectstring to connect back to ourselves */ + char connect_str[20]; + if(!opt_connect_str) { + snprintf(connect_str,20,"localhost:%u",glob.mgmObject->getPort()); + opt_connect_str= connect_str; + } + glob.mgmObject->set_connect_string(opt_connect_str); + if(!glob.mgmObject->check_start()){ ndbout_c("Unable to check start management server."); ndbout_c("Probably caused by illegal initial configuration file."); goto error_end; } + /* + * Connect back to ourselves so we can use mgmapi to fetch + * config info + */ + int mgm_connect_result; + mgm_connect_result = glob.mgmObject->get_config_retriever()-> + do_connect(0,0,0); + + if(mgm_connect_result<0) { + ndbout_c("Unable to connect to our own ndb_mgmd (Error %d)", + mgm_connect_result); + ndbout_c("This is probably a bug."); + } + + if (glob.daemon) { // Become a daemon char *lockfile= NdbConfig_PidFileName(glob.localNodeId); diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index 41ae532164b..183cb6488f8 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -410,7 +410,13 @@ ClusterMgr::reportConnected(NodeId nodeId){ theNode.connected = true; theNode.hbSent = 0; theNode.hbCounter = 0; - + + /** + * make sure the node itself is marked connected even + * if first API_REGCONF has not arrived + */ + theNode.m_state.m_connected_nodes.set(nodeId); + if (theNode.m_info.m_type != NodeInfo::REP) { theNode.hbFrequency = 0; } diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index e7b36d6ee02..b047ae1bd1a 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -49,13 +49,15 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) Uint32 tAnyAlive = 0; int TretCode= 0; + DBUG_ENTER("Ndb::doConnect"); + if (tConNode != 0) { TretCode = NDB_connect(tConNode); if ((TretCode == 1) || (TretCode == 2)) { //**************************************************************************** // We have connections now to the desired node. Return //**************************************************************************** - return getConnectedNdbTransaction(tConNode); + DBUG_RETURN(getConnectedNdbTransaction(tConNode)); } else if (TretCode != 0) { tAnyAlive = 1; }//if @@ -78,10 +80,13 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) //**************************************************************************** // We have connections now to the desired node. Return //**************************************************************************** - return getConnectedNdbTransaction(tNode); + DBUG_RETURN(getConnectedNdbTransaction(tNode)); } else if (TretCode != 0) { tAnyAlive= 1; }//if + DBUG_PRINT("info",("tried node %d, TretCode %d, error code %d, %s", + tNode, TretCode, getNdbError().code, + getNdbError().message)); } } else // just do a regular round robin @@ -103,10 +108,11 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) //**************************************************************************** // We have connections now to the desired node. Return //**************************************************************************** - return getConnectedNdbTransaction(tNode); + DBUG_RETURN(getConnectedNdbTransaction(tNode)); } else if (TretCode != 0) { tAnyAlive= 1; }//if + DBUG_PRINT("info",("tried node %d TretCode %d", tNode, TretCode)); } while (Tcount < tNoOfDbNodes); } //**************************************************************************** @@ -121,7 +127,7 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) } else { theError.code = 4009; }//if - return NULL; + DBUG_RETURN(NULL); } int @@ -134,29 +140,31 @@ Ndb::NDB_connect(Uint32 tNode) int tReturnCode; TransporterFacade *tp = TransporterFacade::instance(); + DBUG_ENTER("Ndb::NDB_connect"); + bool nodeAvail = tp->get_node_alive(tNode); if(nodeAvail == false){ - return 0; + DBUG_RETURN(0); } NdbTransaction * tConArray = theConnectionArray[tNode]; if (tConArray != NULL) { - return 2; + DBUG_RETURN(2); } NdbTransaction * tNdbCon = getNdbCon(); // Get free connection object. if (tNdbCon == NULL) { - return 4; + DBUG_RETURN(4); }//if NdbApiSignal* tSignal = getSignal(); // Get signal object if (tSignal == NULL) { releaseNdbCon(tNdbCon); - return 4; + DBUG_RETURN(4); }//if if (tSignal->setSignal(GSN_TCSEIZEREQ) == -1) { releaseNdbCon(tNdbCon); releaseSignal(tSignal); - return 4; + DBUG_RETURN(4); }//if tSignal->setData(tNdbCon->ptr2int(), 1); //************************************************ @@ -192,13 +200,16 @@ Ndb::NDB_connect(Uint32 tNode) tNdbCon->setMyBlockReference(theMyRef); theConnectionArray[tNode] = tNdbCon; tNdbCon->theNext = tPrevFirst; - return 1; + DBUG_RETURN(1); } else { releaseNdbCon(tNdbCon); //**************************************************************************** // Unsuccessful connect is indicated by 3. //**************************************************************************** - return 3; + DBUG_PRINT("info", + ("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d", + tReturnCode, tNdbCon->Status())); + DBUG_RETURN(3); }//if }//Ndb::NDB_connect() diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index 664d568aee0..4cc47543cec 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -1011,6 +1011,24 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) out << "Type" << (Uint32)col.getType(); break; } + // show unusual (non-MySQL) array size + if (col.getLength() != 1) { + switch (col.getType()) { + case NdbDictionary::Column::Char: + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Binary: + case NdbDictionary::Column::Varbinary: + case NdbDictionary::Column::Blob: + case NdbDictionary::Column::Text: + case NdbDictionary::Column::Bit: + case NdbDictionary::Column::Longvarchar: + case NdbDictionary::Column::Longvarbinary: + break; + default: + out << " [" << col.getLength() << "]"; + break; + } + } if (col.getPrimaryKey()) out << " PRIMARY KEY"; else if (! col.getNullable()) diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 06d526c81b9..dac70de788b 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -195,37 +195,38 @@ NdbColumnImpl::~NdbColumnImpl() bool NdbColumnImpl::equal(const NdbColumnImpl& col) const { + DBUG_ENTER("NdbColumnImpl::equal"); if(strcmp(m_name.c_str(), col.m_name.c_str()) != 0){ - return false; + DBUG_RETURN(false); } if(m_type != col.m_type){ - return false; + DBUG_RETURN(false); } if(m_pk != col.m_pk){ - return false; + DBUG_RETURN(false); } if(m_nullable != col.m_nullable){ - return false; + DBUG_RETURN(false); } if(m_pk){ if(m_distributionKey != col.m_distributionKey){ - return false; + DBUG_RETURN(false); } } if (m_precision != col.m_precision || m_scale != col.m_scale || m_length != col.m_length || m_cs != col.m_cs) { - return false; + DBUG_RETURN(false); } if (m_autoIncrement != col.m_autoIncrement){ - return false; + DBUG_RETURN(false); } if(strcmp(m_defaultValue.c_str(), col.m_defaultValue.c_str()) != 0){ - return false; + DBUG_RETURN(false); } - return true; + DBUG_RETURN(true); } NdbDictionary::Column * @@ -317,49 +318,62 @@ NdbTableImpl::init(){ bool NdbTableImpl::equal(const NdbTableImpl& obj) const { + DBUG_ENTER("NdbTableImpl::equal"); if ((m_internalName.c_str() == NULL) || (strcmp(m_internalName.c_str(), "") == 0) || (obj.m_internalName.c_str() == NULL) || (strcmp(obj.m_internalName.c_str(), "") == 0)) { // Shallow equal if(strcmp(getName(), obj.getName()) != 0){ - return false; + DBUG_PRINT("info",("name %s != %s",getName(),obj.getName())); + DBUG_RETURN(false); } } else // Deep equal if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){ - return false; + { + DBUG_PRINT("info",("m_internalName %s != %s", + m_internalName.c_str(),obj.m_internalName.c_str())); + DBUG_RETURN(false); + } } if(m_fragmentType != obj.m_fragmentType){ - return false; + DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType)); + DBUG_RETURN(false); } if(m_columns.size() != obj.m_columns.size()){ - return false; + DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size())); + DBUG_RETURN(false); } for(unsigned i = 0; i<obj.m_columns.size(); i++){ if(!m_columns[i]->equal(* obj.m_columns[i])){ - return false; + DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i)); + DBUG_RETURN(false); } } if(m_logging != obj.m_logging){ - return false; + DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging)); + DBUG_RETURN(false); } if(m_kvalue != obj.m_kvalue){ - return false; + DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue)); + DBUG_RETURN(false); } if(m_minLoadFactor != obj.m_minLoadFactor){ - return false; + DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor)); + DBUG_RETURN(false); } if(m_maxLoadFactor != obj.m_maxLoadFactor){ - return false; + DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor)); + DBUG_RETURN(false); } - return true; + DBUG_RETURN(true); } void @@ -2159,26 +2173,6 @@ NdbDictInterface::createIndex(Ndb & ndb, } attributeList.id[i] = col->m_attrId; } - if (it == DictTabInfo::UniqueHashIndex) { - // Sort index attributes according to primary table (using insertion sort) - for(i = 1; i < attributeList.sz; i++) { - unsigned int temp = attributeList.id[i]; - unsigned int j = i; - while((j > 0) && (attributeList.id[j - 1] > temp)) { - attributeList.id[j] = attributeList.id[j - 1]; - j--; - } - attributeList.id[j] = temp; - } - // Check for illegal duplicate attributes - for(i = 0; i<attributeList.sz; i++) { - if ((i != (attributeList.sz - 1)) && - (attributeList.id[i] == attributeList.id[i+1])) { - m_error.code = 4258; - return -1; - } - } - } LinearSectionPtr ptr[3]; ptr[0].p = (Uint32*)&attributeList; ptr[0].sz = 1 + attributeList.sz; @@ -2549,6 +2543,7 @@ int NdbDictInterface::executeSubscribeEvent(class Ndb & ndb, NdbEventImpl & evnt) { + DBUG_ENTER("NdbDictInterface::executeSubscribeEvent"); NdbApiSignal tSignal(m_reference); // tSignal.theReceiversBlockNumber = SUMA; tSignal.theReceiversBlockNumber = DBDICT; @@ -2563,7 +2558,7 @@ NdbDictInterface::executeSubscribeEvent(class Ndb & ndb, sumaStart->subscriberData = evnt.m_bufferId & 0xFF; sumaStart->subscriberRef = m_reference; - return executeSubscribeEvent(&tSignal, NULL); + DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL)); } int diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 2d20a0a52db..59a5956715a 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -76,8 +76,10 @@ public: Uint32 m_attrSize; // element size (size when arraySize==1) Uint32 m_arraySize; // length or length+2 for Var* types Uint32 m_keyInfoPos; + // TODO: use bits in attr desc 2 bool getInterpretableType() const ; bool getCharType() const; + bool getStringType() const; bool getBlobType() const; /** @@ -468,6 +470,17 @@ NdbColumnImpl::getCharType() const { m_type == NdbDictionary::Column::Text || m_type == NdbDictionary::Column::Longvarchar); } + +inline +bool +NdbColumnImpl::getStringType() const { + return (m_type == NdbDictionary::Column::Char || + m_type == NdbDictionary::Column::Varchar || + m_type == NdbDictionary::Column::Longvarchar || + m_type == NdbDictionary::Column::Binary || + m_type == NdbDictionary::Column::Varbinary || + m_type == NdbDictionary::Column::Longvarbinary); +} inline bool @@ -547,7 +560,7 @@ NdbTableImpl::getColumn(const char * name){ do { if(hashValue == (tmp & 0xFFFE)){ NdbColumnImpl* col = cols[tmp >> 16]; - if(strcmp(name, col->m_name.c_str()) == 0){ + if(strncmp(name, col->m_name.c_str(), NDB_MAX_ATTR_NAME_SIZE-1) == 0){ return col; } } @@ -565,7 +578,7 @@ NdbTableImpl::getColumn(const char * name){ } else { for(Uint32 i = 0; i<sz; i++){ NdbColumnImpl* col = * cols++; - if(col != 0 && strcmp(name, col->m_name.c_str()) == 0) + if(col != 0 && strncmp(name, col->m_name.c_str(), NDB_MAX_ATTR_NAME_SIZE-1) == 0) return col; } } diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 9cea3ec83cd..bafb8f7ca38 100644 --- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -92,10 +92,7 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N, NdbEventOperationImpl::~NdbEventOperationImpl() { int i; - if (sdata) NdbMem_Free(sdata); - for (i=0 ; i<3; i++) { - if (ptr[i].p) NdbMem_Free(ptr[i].p); - } + if (sdata) NdbMem_Free((char*)sdata); for (i=0 ; i<2; i++) { NdbRecAttr *p = theFirstRecAttrs[i]; while (p) { @@ -853,42 +850,49 @@ NdbGlobalEventBuffer::~NdbGlobalEventBuffer() // NdbMem_Deallocate(m_eventBufferIdToEventId); } void -NdbGlobalEventBuffer::real_init (NdbGlobalEventBufferHandle *h, +NdbGlobalEventBuffer::real_init (NdbGlobalEventBufferHandle *h, int MAX_NUMBER_ACTIVE_EVENTS) { - if (m_handlers.size() == 0) { // First init + DBUG_ENTER("NdbGlobalEventBuffer::real_init"); + DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h)); + if (m_handlers.size() == 0) + { // First init + DBUG_PRINT("info",("first to come")); m_max = MAX_NUMBER_ACTIVE_EVENTS; m_buf = new BufItem[m_max]; - // (BufItem *)NdbMem_Allocate(m_max*sizeof(BufItem)); - for (int i=0; i<m_max; i++) { m_buf[i].gId= 0; } } + assert(m_max == MAX_NUMBER_ACTIVE_EVENTS); // TODO make sure we don't hit roof - // m_handlers[m_nhandlers] = h; m_handlers.push_back(h); - // ndbout_c("NdbGlobalEventBuffer::real_init(), m_handles=%u %u", m_nhandlers, h); + DBUG_VOID_RETURN; } void NdbGlobalEventBuffer::real_remove(NdbGlobalEventBufferHandle *h) { - // ndbout_c("NdbGlobalEventBuffer::real_init_remove(), m_handles=%u %u", m_nhandlers, h); - for (Uint32 i=0 ; i < m_handlers.size(); i++) { - // ndbout_c("%u %u %u", i, m_handlers[i], h); - if (m_handlers[i] == h) { + DBUG_ENTER("NdbGlobalEventBuffer::real_remove"); + DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h)); + for (Uint32 i=0 ; i < m_handlers.size(); i++) + { + DBUG_PRINT("info",("m_handlers[%u] %u", i, m_handlers[i])); + if (m_handlers[i] == h) + { m_handlers.erase(i); - if (m_handlers.size() == 0) { - // ndbout_c("last to go"); + if (m_handlers.size() == 0) + { + DBUG_PRINT("info",("last to go")); delete[] m_buf; m_buf = NULL; - // NdbMem_Free((char*)m_buf); } - return; + DBUG_VOID_RETURN; } } - ndbout_c("NdbGlobalEventBuffer::real_init_remove() non-existing handle"); - exit(-1); + ndbout_c("NdbGlobalEventBuffer::real_remove() non-existing handle"); + DBUG_PRINT("error",("non-existing handle")); + abort(); + DBUG_VOID_RETURN; } int @@ -1231,6 +1235,9 @@ int NdbGlobalEventBuffer::real_getDataL(const int bufferId, DBUG_RETURN(0); // nothing to get } + DBUG_PRINT("info",("ID(bufferId) %d NO(bufferId) %d e.b %d", + ID(bufferId), NO(bufferId), e.b)); + if (copy_data_alloc(b.data[e.b].sdata, b.data[e.b].ptr, sdata, ptr)) { @@ -1255,26 +1262,29 @@ NdbGlobalEventBuffer::copy_data_alloc(const SubTableData * const f_sdata, LinearSectionPtr t_ptr[3]) { DBUG_ENTER("NdbGlobalEventBuffer::copy_data_alloc"); - if (t_sdata == NULL) { - t_sdata = (SubTableData *)NdbMem_Allocate(sizeof(SubTableData)); - } + unsigned sz4= (sizeof(SubTableData)+3)>>2; + Uint32 *ptr= (Uint32*)NdbMem_Allocate((sz4 + + f_ptr[0].sz + + f_ptr[1].sz + + f_ptr[2].sz) * sizeof(Uint32)); + if (t_sdata) + NdbMem_Free((char*)t_sdata); + t_sdata= (SubTableData *)ptr; memcpy(t_sdata,f_sdata,sizeof(SubTableData)); + ptr+= sz4; + for (int i = 0; i < 3; i++) { LinearSectionPtr & f_p = f_ptr[i]; LinearSectionPtr & t_p = t_ptr[i]; if (f_p.sz > 0) { - if (t_p.p == NULL) { - t_p.p = (Uint32 *)NdbMem_Allocate(sizeof(Uint32)*f_p.sz); - } else if (t_p.sz != f_p.sz) { - NdbMem_Free(t_p.p); - t_p.p = (Uint32 *)NdbMem_Allocate(sizeof(Uint32)*f_p.sz); - } + t_p.p= (Uint32 *)ptr; memcpy(t_p.p, f_p.p, sizeof(Uint32)*f_p.sz); - } else if (t_p.p != NULL) { - NdbMem_Free(t_p.p); - t_p.p = NULL; + ptr+= f_p.sz; + t_p.sz= f_p.sz; + } else { + t_p.p= NULL; + t_p.sz= 0; } - t_p.sz = f_p.sz; } DBUG_RETURN(0); } diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/ndb/src/ndbapi/NdbOperationInt.cpp index 087a8f1fc92..acfc80b3b69 100644 --- a/ndb/src/ndbapi/NdbOperationInt.cpp +++ b/ndb/src/ndbapi/NdbOperationInt.cpp @@ -1014,8 +1014,14 @@ NdbOperation::branch_col(Uint32 type, Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ + DBUG_ENTER("NdbOperation::branch_col"); + DBUG_PRINT("enter", ("type=%u col=%u val=0x%x len=%u label=%u", + type, ColId, val, len, Label)); + if (val != NULL) + DBUG_DUMP("value", (char*)val, len); + if (initial_interpreterCheck() == -1) - return -1; + DBUG_RETURN(-1); Interpreter::BinaryCondition c = (Interpreter::BinaryCondition)type; @@ -1026,23 +1032,29 @@ NdbOperation::branch_col(Uint32 type, abort(); } - Uint32 sizeInBytes = col->m_attrSize * col->m_arraySize; - if(len != 0 && len != sizeInBytes) - { - setErrorCodeAbort(4209); - return -1; + if (val == NULL) + len = 0; + else { + if (! col->getStringType()) { + // prevent assert in NdbSqlUtil on length error + Uint32 sizeInBytes = col->m_attrSize * col->m_arraySize; + if (len != 0 && len != sizeInBytes) + { + setErrorCodeAbort(4209); + DBUG_RETURN(-1); + } + len = sizeInBytes; + } } - - len = sizeInBytes; if (insertATTRINFO(Interpreter::BranchCol(c, 0, 0, false)) == -1) - return -1; + DBUG_RETURN(-1); if (insertBranch(Label) == -1) - return -1; + DBUG_RETURN(-1); if (insertATTRINFO(Interpreter::BranchCol_2(ColId, len))) - return -1; + DBUG_RETURN(-1); Uint32 len2 = Interpreter::mod4(len); if(len2 == len){ @@ -1059,7 +1071,7 @@ NdbOperation::branch_col(Uint32 type, } theErrorLine++; - return 0; + DBUG_RETURN(0); } int diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp index 86a777e79d2..5e5306fc33a 100644 --- a/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/ndb/src/ndbapi/NdbRecAttr.cpp @@ -139,8 +139,9 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ static void ndbrecattr_print_string(NdbOut& out, const char *type, - const char *ref, unsigned sz) + const char *aref, unsigned sz) { + const unsigned char* ref = (const unsigned char*)aref; int i, len, printable= 1; // trailing zeroes are not printed for (i=sz-1; i >= 0; i--) @@ -166,7 +167,7 @@ ndbrecattr_print_string(NdbOut& out, const char *type, for (i= len+1; ref[i] != 0; i++) out.print("%u]",len-i); assert((int)sz > i); - ndbrecattr_print_string(out,type,ref+i,sz-i); + ndbrecattr_print_string(out,type,aref+i,sz-i); } } diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp index f3f73c75ac0..b39fd10fe95 100644 --- a/ndb/src/ndbapi/NdbScanFilter.cpp +++ b/ndb/src/ndbapi/NdbScanFilter.cpp @@ -405,8 +405,8 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op, return -1; } - (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel); - return 0; + int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel); + return ret; } int @@ -426,6 +426,10 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId, return m_impl.cond_col_const(Interpreter::EQ, ColId, val, len); case COND_NE: return m_impl.cond_col_const(Interpreter::NE, ColId, val, len); + case COND_LIKE: + return m_impl.cond_col_const(Interpreter::LIKE, ColId, val, len); + case COND_NOT_LIKE: + return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len); } return -1; } diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index ab6c0ca3e38..67581e4a0f8 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -1420,14 +1420,18 @@ Remark: Sets TC Connect pointer. int NdbTransaction::receiveTCSEIZEREF(NdbApiSignal* aSignal) { + DBUG_ENTER("NdbTransaction::receiveTCSEIZEREF"); if (theStatus != Connecting) { - return -1; + DBUG_RETURN(-1); } else { theStatus = ConnectFailure; theNdb->theError.code = aSignal->readData(2); - return 0; + DBUG_PRINT("info",("error code %d, %s", + theNdb->getNdbError().code, + theNdb->getNdbError().message)); + DBUG_RETURN(0); } }//NdbTransaction::receiveTCSEIZEREF() diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index 40eb46ea397..40aaa1e3daa 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -388,24 +388,24 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) com = tRec->execTRANSID_AI(tDataPtr + TransIdAI::HeaderLength, tLen - TransIdAI::HeaderLength); } + + if(com == 0) + return; - if(com == 1){ - switch(tRec->getType()){ - case NdbReceiver::NDB_OPERATION: - case NdbReceiver::NDB_INDEX_OPERATION: - if(tCon->OpCompleteSuccess() != -1){ - completedTransaction(tCon); - return; - } - break; - case NdbReceiver::NDB_SCANRECEIVER: - tCon->theScanningOp->receiver_delivered(tRec); - theImpl->theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ? - (Uint32) NO_WAIT : tWaitState); - break; - default: - goto InvalidSignal; + switch(tRec->getType()){ + case NdbReceiver::NDB_OPERATION: + case NdbReceiver::NDB_INDEX_OPERATION: + if(tCon->OpCompleteSuccess() != -1){ + completedTransaction(tCon); } + return; + case NdbReceiver::NDB_SCANRECEIVER: + tCon->theScanningOp->receiver_delivered(tRec); + theImpl->theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ? + (Uint32) NO_WAIT : tWaitState); + break; + default: + goto InvalidSignal; } break; } else { diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index 5b5394f0965..7f1e68a42d3 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -466,8 +466,7 @@ void TransporterFacade::threadMainReceive(void) theTransporterRegistry->stopReceiving(); } -TransporterFacade::TransporterFacade(NdbMgmHandle mgm_handle) : - m_mgm_handle(mgm_handle), +TransporterFacade::TransporterFacade() : theTransporterRegistry(0), theStopReceive(0), theSendThread(NULL), @@ -496,7 +495,7 @@ bool TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props) { theOwnId = nodeId; - theTransporterRegistry = new TransporterRegistry(m_mgm_handle,this); + theTransporterRegistry = new TransporterRegistry(this); const int res = IPCConfig::configureTransporters(nodeId, * props, diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp index bd3fc67a8d6..e74f4b51e00 100644 --- a/ndb/src/ndbapi/TransporterFacade.hpp +++ b/ndb/src/ndbapi/TransporterFacade.hpp @@ -47,7 +47,7 @@ extern "C" { class TransporterFacade { public: - TransporterFacade(NdbMgmHandle mgm_handle); + TransporterFacade(); virtual ~TransporterFacade(); bool init(Uint32, const ndb_mgm_configuration *); @@ -133,7 +133,6 @@ private: bool isConnected(NodeId aNodeId); void doStop(); - NdbMgmHandle m_mgm_handle; TransporterRegistry* theTransporterRegistry; SocketServer m_socket_server; int sendPerformedLastInterval; diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 5244d251cad..1990d2b6d52 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -285,7 +285,7 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char * m_transporter_facade= TransporterFacade::theFacadeInstance= - new TransporterFacade(m_config_retriever->get_mgmHandle()); + new TransporterFacade(); DBUG_VOID_RETURN; } @@ -506,7 +506,7 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds, CFG_CONNECTION_SERVER_PORT, m_impl.m_transporter_facade->get_registry() ->m_transporter_interface[i] - .m_service_port, + .m_s_service_port, &mgm_reply); ndb_mgm_destroy_configuration(props); diff --git a/ndb/test/ndbapi/testBitfield.cpp b/ndb/test/ndbapi/testBitfield.cpp index 3c8f7d8de4b..e26f495f5a4 100644 --- a/ndb/test/ndbapi/testBitfield.cpp +++ b/ndb/test/ndbapi/testBitfield.cpp @@ -5,40 +5,22 @@ #include <NdbApi.hpp> #include <HugoTransactions.hpp> -static const char* opt_connect_str= 0; static const char* _dbname = "TEST_DB"; static int g_loops = 7; -static void print_version() -{ - printf("MySQL distrib %s, for %s (%s)\n", - MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); -} static void usage() { - char desc[] = - "tabname\n"\ - "This program list all properties of table(s) in NDB Cluster.\n"\ - " ex: desc T1 T2 T4\n"; - print_version(); + ndb_std_print_version(); } +#if 0 static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) + const char *argument) { - switch (optid) { - case '#': - DBUG_PUSH(argument ? argument : "d:t:O,/tmp/ndb_desc.trace"); - break; - case 'V': - print_version(); - exit(0); - case '?': - usage(); - exit(0); - } - return 0; + return ndb_std_get_one_option(optid, opt, argument ? argument : + "d:t:O,/tmp/testBitfield.trace"); } +#endif static const NdbDictionary::Table* create_random_table(Ndb*); static int transactions(Ndb*, const NdbDictionary::Table* tab); diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 460c3e77745..5240735dcc6 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -534,13 +534,6 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){ int result = NDBT_OK; NdbRestarter restarter; - // enum FragmentType { - // Unknown = 0, - // Single = 1, ///< Only one fragment - // All = 2, ///< Default value. One fragment per node group - // AllLarge = 3 ///< Sixten fragments per node group. - // }; - if (pNdb->waitUntilReady(30) != 0){ // Db is not ready, return with failure return NDBT_FAILED; @@ -575,13 +568,17 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){ result = NDBT_FAILED; goto drop_the_tab; } - +/** + This test does not work since fragmentation is + decided by the kernel, hence the fragementation + attribute on the column will differ + if (newTab.equal(*pTab3) == false){ ndbout << "It was not equal" << endl; result = NDBT_FAILED; goto drop_the_tab; } - +*/ do { HugoTransactions hugoTrans(*pTab3); @@ -1598,17 +1595,22 @@ TESTCASE("CreateTableWhenDbIsFull", } TESTCASE("FragmentTypeSingle", "Create the table with fragment type Single\n"){ - TC_PROPERTY("FragmentType", 1); + TC_PROPERTY("FragmentType", NdbDictionary::Table::FragSingle); + INITIALIZER(runTestFragmentTypes); +} +TESTCASE("FragmentTypeAllSmall", + "Create the table with fragment type AllSmall\n"){ + TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllSmall); INITIALIZER(runTestFragmentTypes); } -TESTCASE("FragmentTypeAll", - "Create the table with fragment type All\n"){ - TC_PROPERTY("FragmentType", 2); +TESTCASE("FragmentTypeAllMedium", + "Create the table with fragment type AllMedium\n"){ + TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllMedium); INITIALIZER(runTestFragmentTypes); } TESTCASE("FragmentTypeAllLarge", "Create the table with fragment type AllLarge\n"){ - TC_PROPERTY("FragmentType", 3); + TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllLarge); INITIALIZER(runTestFragmentTypes); } TESTCASE("TemporaryTables", diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index ce1851083b1..96926a421fb 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -612,7 +612,10 @@ struct Col { bool m_pk; Type m_type; unsigned m_length; - unsigned m_bytelength; + unsigned m_bytelength; // multiplied by char width + unsigned m_attrsize; // base type size + unsigned m_headsize; // length bytes + unsigned m_bytesize; // full value size bool m_nullable; const Chs* m_chs; Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs); @@ -629,12 +632,26 @@ Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type typ m_type(type), m_length(length), m_bytelength(length * (chs == 0 ? 1 : chs->m_cs->mbmaxlen)), + m_attrsize( + type == Unsigned ? sizeof(Uint32) : + type == Char ? sizeof(char) : + type == Varchar ? sizeof(char) : + type == Longvarchar ? sizeof(char) : ~0), + m_headsize( + type == Unsigned ? 0 : + type == Char ? 0 : + type == Varchar ? 1 : + type == Longvarchar ? 2 : ~0), + m_bytesize(m_headsize + m_attrsize * m_bytelength), m_nullable(nullable), m_chs(chs) { // fix long varchar - if (type == Varchar && m_bytelength > 255) + if (type == Varchar && m_bytelength > 255) { m_type = Longvarchar; + m_headsize += 1; + m_bytesize += 1; + } } Col::~Col() @@ -1119,13 +1136,15 @@ struct Con { NdbIndexOperation* m_indexop; NdbScanOperation* m_scanop; NdbIndexScanOperation* m_indexscanop; + NdbScanFilter* m_scanfilter; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; ScanMode m_scanmode; enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; ErrType m_errtype; Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0), - m_scanop(0), m_indexscanop(0), m_scanmode(ScanNo), m_errtype(ErrNone) {} + m_scanop(0), m_indexscanop(0), m_scanfilter(0), + m_scanmode(ScanNo), m_errtype(ErrNone) {} ~Con() { if (m_tx != 0) closeTransaction(); @@ -1140,10 +1159,14 @@ struct Con { int getNdbScanOperation(const Tab& tab); int getNdbIndexScanOperation1(const ITab& itab, const Tab& tab); int getNdbIndexScanOperation(const ITab& itab, const Tab& tab); + int getNdbScanFilter(); int equal(int num, const char* addr); int getValue(int num, NdbRecAttr*& rec); int setValue(int num, const char* addr); int setBound(int num, int type, const void* value); + int beginFilter(int group); + int endFilter(); + int setFilter(int num, int cond, const void* value, unsigned len); int execute(ExecType t); int execute(ExecType t, bool& deadlock); int readTuples(Par par); @@ -1254,6 +1277,15 @@ Con::getNdbIndexScanOperation(const ITab& itab, const Tab& tab) } int +Con::getNdbScanFilter() +{ + assert(m_tx != 0 && m_scanop != 0); + delete m_scanfilter; + m_scanfilter = new NdbScanFilter(m_scanop); + return 0; +} + +int Con::equal(int num, const char* addr) { assert(m_tx != 0 && m_op != 0); @@ -1280,12 +1312,36 @@ Con::setValue(int num, const char* addr) int Con::setBound(int num, int type, const void* value) { - assert(m_tx != 0 && m_op != 0); + assert(m_tx != 0 && m_indexscanop != 0); CHKCON(m_indexscanop->setBound(num, type, value) == 0, *this); return 0; } int +Con::beginFilter(int group) +{ + assert(m_tx != 0 && m_scanfilter != 0); + CHKCON(m_scanfilter->begin((NdbScanFilter::Group)group) == 0, *this); + return 0; +} + +int +Con::endFilter() +{ + assert(m_tx != 0 && m_scanfilter != 0); + CHKCON(m_scanfilter->end() == 0, *this); + return 0; +} + +int +Con::setFilter(int num, int cond, const void* value, unsigned len) +{ + assert(m_tx != 0 && m_scanfilter != 0); + CHKCON(m_scanfilter->cmp((NdbScanFilter::BinaryCondition)cond, num, value, len) == 0, *this); + return 0; +} + +int Con::execute(ExecType t) { assert(m_tx != 0); @@ -1502,7 +1558,7 @@ createtable(Par par) const Col& col = *tab.m_col[k]; NdbDictionary::Column c(col.m_name); c.setType((NdbDictionary::Column::Type)col.m_type); - c.setLength(col.m_bytelength); // NDB API uses length in bytes + c.setLength(col.m_bytelength); // for char NDB API uses length in bytes c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); if (col.m_chs != 0) @@ -2836,6 +2892,7 @@ struct BVal : public Val { int m_type; BVal(const ICol& icol); int setbnd(Par par) const; + int setflt(Par par) const; }; BVal::BVal(const ICol& icol) : @@ -2855,6 +2912,27 @@ BVal::setbnd(Par par) const return 0; } +int +BVal::setflt(Par par) const +{ + static unsigned index_bound_to_filter_bound[5] = { + NdbScanFilter::COND_GE, + NdbScanFilter::COND_GT, + NdbScanFilter::COND_LE, + NdbScanFilter::COND_LT, + NdbScanFilter::COND_EQ + }; + Con& con = par.con(); + assert(g_compare_null || ! m_null); + const char* addr = ! m_null ? (const char*)dataaddr() : 0; + const ICol& icol = m_icol; + const Col& col = icol.m_col; + unsigned length = col.m_bytesize; + unsigned cond = index_bound_to_filter_bound[m_type]; + CHK(con.setFilter(col.m_num, cond, addr, length) == 0); + return 0; +} + static NdbOut& operator<<(NdbOut& out, const BVal& bval) { @@ -2882,6 +2960,7 @@ struct BSet { void calc(Par par); void calcpk(Par par, unsigned i); int setbnd(Par par) const; + int setflt(Par par) const; void filter(Par par, const Set& set, Set& set2) const; }; @@ -3005,6 +3084,33 @@ BSet::setbnd(Par par) const return 0; } +int +BSet::setflt(Par par) const +{ + Con& con = par.con(); + CHK(con.getNdbScanFilter() == 0); + CHK(con.beginFilter(NdbScanFilter::AND) == 0); + if (m_bvals != 0) { + unsigned p1 = urandom(m_bvals); + unsigned p2 = 10009; // prime + const unsigned extras = 5; + // random order + for (unsigned j = 0; j < m_bvals + extras; j++) { + unsigned k = p1 + p2 * j; + const BVal& bval = *m_bval[k % m_bvals]; + CHK(bval.setflt(par) == 0); + } + // duplicate + if (urandom(5) == 0) { + unsigned k = urandom(m_bvals); + const BVal& bval = *m_bval[k]; + CHK(bval.setflt(par) == 0); + } + } + CHK(con.endFilter() == 0); + return 0; +} + void BSet::filter(Par par, const Set& set, Set& set2) const { @@ -3594,12 +3700,69 @@ scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countche } static int +scanreadfilter(Par par, const ITab& itab, BSet& bset, bool calc) +{ + Con& con = par.con(); + const Tab& tab = par.tab(); + const Set& set = par.set(); + Set set1(tab, set.m_rows); + if (calc) { + while (true) { + bset.calc(par); + bset.filter(par, set, set1); + unsigned n = set1.count(); + // prefer proper subset + if (0 < n && n < set.m_rows) + break; + if (urandom(3) == 0) + break; + set1.reset(); + } + } else { + bset.filter(par, set, set1); + } + LL3("scanfilter " << itab.m_name << " " << bset << " lockmode=" << par.m_lockmode << " expect=" << set1.count() << " verify=" << par.m_verify); + Set set2(tab, set.m_rows); + CHK(con.startTransaction() == 0); + CHK(con.getNdbScanOperation(tab) == 0); + CHK(con.readTuples(par) == 0); + CHK(bset.setflt(par) == 0); + set2.getval(par); + CHK(con.executeScan() == 0); + unsigned n = 0; + bool deadlock = false; + while (1) { + int ret; + deadlock = par.m_deadlock; + CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1); + if (ret == 1) + break; + if (deadlock) { + LL1("scanfilter: stop on deadlock"); + break; + } + unsigned i = (unsigned)-1; + CHK(set2.getkey(par, &i) == 0); + CHK(set2.putval(i, par.m_dups, n) == 0); + LL4("key " << i << " row " << n << ": " << *set2.m_row[i]); + n++; + } + con.closeTransaction(); + if (par.m_verify) { + CHK(set1.verify(par, set2) == 0); + } + LL3("scanfilter " << itab.m_name << " done rows=" << n); + return 0; +} + +static int scanreadindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); for (unsigned i = 0; i < par.m_subsubloop; i++) { if (itab.m_type == ITab::OrderedIndex) { BSet bset(tab, itab, par.m_rows); + CHK(scanreadfilter(par, itab, bset, true) == 0); CHK(scanreadindex(par, itab, bset, true) == 0); } } @@ -3626,8 +3789,7 @@ scanreadindex(Par par) static int scanreadall(Par par) { - if (par.m_no < 11) - CHK(scanreadtable(par) == 0); + CHK(scanreadtable(par) == 0); CHK(scanreadindex(par) == 0); return 0; } diff --git a/ndb/test/ndbapi/test_event.cpp b/ndb/test/ndbapi/test_event.cpp index 567cd1581f5..3619fe195cf 100644 --- a/ndb/test/ndbapi/test_event.cpp +++ b/ndb/test/ndbapi/test_event.cpp @@ -32,6 +32,29 @@ int runCreateEvent(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step) +{ + const NdbDictionary::Table *table= ctx->getTab(); + char buf[1024]; + sprintf(buf, "%s_SHADOW", table->getName()); + + GETNDB(step)->getDictionary()->dropTable(buf); + if (GETNDB(step)->getDictionary()->getTable(buf)) + { + g_err << "unsucessful drop of " << buf << endl; + return NDBT_FAILED; + } + + NdbDictionary::Table table_shadow(*table); + table_shadow.setName(buf); + GETNDB(step)->getDictionary()->createTable(table_shadow); + if (GETNDB(step)->getDictionary()->getTable(buf)) + return NDBT_OK; + + g_err << "unsucessful create of " << buf << endl; + return NDBT_FAILED; +} + int runCreateDropEventOperation(NDBT_Context* ctx, NDBT_Step* step) { int loops = ctx->getNumLoops(); @@ -83,7 +106,7 @@ int runEventOperation(NDBT_Context* ctx, NDBT_Step* step) EventOperationStats stats; - g_info << "***** Id " << tId << endl; + g_info << "***** start Id " << tId << endl; // sleep(tId); @@ -102,12 +125,13 @@ int runEventOperation(NDBT_Context* ctx, NDBT_Step* step) ret = NDBT_FAILED; if (ret == NDBT_FAILED) { - ndbout << "n_inserts = " << stats.n_inserts << endl; - ndbout << "n_deletes = " << stats.n_deletes << endl; - ndbout << "n_updates = " << stats.n_updates << endl; - ndbout << "n_consecutive = " << stats.n_consecutive << endl; - ndbout << "n_duplicates = " << stats.n_duplicates << endl; - ndbout << "n_inconsistent_gcis = " << stats.n_inconsistent_gcis << endl; + g_info << "***** end Id " << tId << endl; + ndbout_c("n_inserts = %d (%d)", stats.n_inserts, records); + ndbout_c("n_deletes = %d (%d)", stats.n_deletes, records); + ndbout_c("n_updates = %d (%d)", stats.n_updates, records); + ndbout_c("n_consecutive = %d (%d)", stats.n_consecutive, 3); + ndbout_c("n_duplicates = %d (%d)", stats.n_duplicates, 0); + ndbout_c("n_inconsistent_gcis = %d (%d)", stats.n_inconsistent_gcis, 0); } return ret; @@ -134,6 +158,36 @@ int runEventLoad(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int runEventMixedLoad(NDBT_Context* ctx, NDBT_Step* step) +{ + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + HugoTransactions hugoTrans(*ctx->getTab()); + + sleep(5); + + if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){ + return NDBT_FAILED; + } + if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){ + return NDBT_FAILED; + } + if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){ + return NDBT_FAILED; + } + if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){ + return NDBT_FAILED; + } + if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){ + return NDBT_FAILED; + } + if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){ + return NDBT_FAILED; + } + + return NDBT_OK; +} + int runDropEvent(NDBT_Context* ctx, NDBT_Step* step) { HugoTransactions hugoTrans(*ctx->getTab()); @@ -145,6 +199,224 @@ int runDropEvent(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int runVerify(NDBT_Context* ctx, NDBT_Step* step) +{ + int records = ctx->getNumRecords(); + const NdbDictionary::Table * table= ctx->getTab(); + char buf[1024]; + + sprintf(buf, "%s_SHADOW", table->getName()); + const NdbDictionary::Table * table_shadow; + if ((table_shadow = GETNDB(step)->getDictionary()->getTable(buf)) == 0) + { + g_err << "Unable to get table " << buf << endl; + return NDBT_FAILED; + } + + HugoTransactions hugoTrans(*table_shadow); + if (hugoTrans.pkReadRecords(GETNDB(step), records) != 0){ + return NDBT_FAILED; + } + + return NDBT_OK; +} + +int runEventApplier(NDBT_Context* ctx, NDBT_Step* step) +{ + DBUG_ENTER("runEventApplier"); + + int records = ctx->getNumRecords(); + int loops = ctx->getNumLoops(); + const NdbDictionary::Table * table= ctx->getTab(); + char buf[1024]; + + sprintf(buf, "%s_SHADOW", table->getName()); + const NdbDictionary::Table * table_shadow; + if ((table_shadow = GETNDB(step)->getDictionary()->getTable(buf)) == 0) + { + g_err << "Unable to get table " << buf << endl; + DBUG_RETURN(NDBT_FAILED); + } + + sprintf(buf, "%s_EVENT", table->getName()); + NdbEventOperation *pOp; + pOp = GETNDB(step)->createEventOperation(buf, 10*records); + if ( pOp == NULL ) { + g_err << "Event operation creation failed on %s" << buf << endl; + DBUG_RETURN(NDBT_FAILED); + } + + int i; + int n_columns= table->getNoOfColumns(); + NdbRecAttr* recAttr[1024]; + NdbRecAttr* recAttrPre[1024]; + for (i = 0; i < n_columns; i++) { + recAttr[i] = pOp->getValue(table->getColumn(i)->getName()); + recAttrPre[i] = pOp->getPreValue(table->getColumn(i)->getName()); + } + + if (pOp->execute()) { // This starts changes to "start flowing" + g_err << "execute operation execution failed: \n"; + g_err << pOp->getNdbError().code << " " + << pOp->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + + int r= 0; + int res; + while (r < 10*records){ + //printf("now waiting for event...\n"); + res= GETNDB(step)->pollEvents(1000); // wait for event or 1000 ms + if (res <= 0) + continue; + + //printf("got data! %d\n", r); + int overrun= 0; + while (pOp->next(&overrun) > 0) + { + if (overrun) + { + g_err << "buffer overrun\n"; + DBUG_RETURN(NDBT_FAILED); + } + r++; + + Uint32 gci= pOp->getGCI(); + + if (!pOp->isConsistent()) { + g_err << "A node failure has occured and events might be missing\n"; + DBUG_RETURN(NDBT_FAILED); + } + + int noRetries= 0; + do + { + NdbTransaction *trans= GETNDB(step)->startTransaction(); + if (trans == 0) + { + g_err << "startTransaction failed " + << GETNDB(step)->getNdbError().code << " " + << GETNDB(step)->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + + NdbOperation *op= trans->getNdbOperation(table_shadow); + if (op == 0) + { + g_err << "getNdbOperation failed " + << trans->getNdbError().code << " " + << trans->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + + switch (pOp->getEventType()) { + case NdbDictionary::Event::TE_INSERT: + if (op->insertTuple()) + { + g_err << "insertTuple " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + break; + case NdbDictionary::Event::TE_DELETE: + if (op->deleteTuple()) + { + g_err << "deleteTuple " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + break; + case NdbDictionary::Event::TE_UPDATE: + if (op->updateTuple()) + { + g_err << "updateTuple " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + break; + case NdbDictionary::Event::TE_ALL: + abort(); + } + + for (i= 0; i < n_columns; i++) + { + if (table->getColumn(i)->getPrimaryKey() && + op->equal(i,recAttr[i]->aRef())) + { + g_err << "equal " << i << " " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + } + + switch (pOp->getEventType()) { + case NdbDictionary::Event::TE_INSERT: + for (i= 0; i < n_columns; i++) + { + if (!table->getColumn(i)->getPrimaryKey() && + op->setValue(i,recAttr[i]->aRef())) + { + g_err << "setValue(insert) " << i << " " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + } + break; + case NdbDictionary::Event::TE_DELETE: + break; + case NdbDictionary::Event::TE_UPDATE: + for (i= 0; i < n_columns; i++) + { + if (!table->getColumn(i)->getPrimaryKey() && + recAttr[i]->isNULL() >= 0 && + op->setValue(i,recAttr[i]->aRef())) + { + g_err << "setValue(update) " << i << " " + << op->getNdbError().code << " " + << op->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + } + break; + case NdbDictionary::Event::TE_ALL: + abort(); + } + if (trans->execute(Commit) == 0) + { + trans->close(); + // everything ok + break; + } + if (noRetries++ == 10 || + trans->getNdbError().status != NdbError::TemporaryError) + { + g_err << "execute " << r << " failed " + << trans->getNdbError().code << " " + << trans->getNdbError().message << endl; + trans->close(); + DBUG_RETURN(NDBT_FAILED); + } + trans->close(); + NdbSleep_MilliSleep(100); // sleep before retying + } while(1); + } + } + + if (GETNDB(step)->dropEventOperation(pOp)) { + g_err << "dropEventOperation execution failed " + << GETNDB(step)->getNdbError().code << " " + << GETNDB(step)->getNdbError().message << endl; + DBUG_RETURN(NDBT_FAILED); + } + + DBUG_RETURN(NDBT_OK); +} + // INITIALIZER(runInsert); // STEP(runPkRead); // VERIFIER(runVerifyInsert); @@ -156,9 +428,6 @@ TESTCASE("BasicEventOperation", "NOTE! No errors are allowed!" ){ INITIALIZER(runCreateEvent); STEP(runEventOperation); - STEP(runEventOperation); - STEP(runEventOperation); - STEP(runEventOperation); STEP(runEventLoad); FINALIZER(runDropEvent); } @@ -169,19 +438,27 @@ TESTCASE("CreateDropEventOperation", STEP(runCreateDropEventOperation); FINALIZER(runDropEvent); } -NDBT_TESTSUITE_END(test_event); - -#if 0 -NDBT_TESTSUITE(test_event); TESTCASE("ParallellEventOperation", - "Verify that we can listen to Events in Parallell" + "Verify that we can listen to Events in parallell" "NOTE! No errors are allowed!" ){ - INITIALIZER(runCreateAllEvent); + INITIALIZER(runCreateEvent); STEP(runEventOperation); + STEP(runEventOperation); + STEP(runEventLoad); FINALIZER(runDropEvent); } +TESTCASE("EventOperationApplier", + "Verify that if we apply the data we get from event " + "operation is the same as the original table" + "NOTE! No errors are allowed!" ){ + INITIALIZER(runCreateEvent); + INITIALIZER(runCreateShadowTable); + STEP(runEventApplier); + STEP(runEventMixedLoad); + FINALIZER(runDropEvent); + FINALIZER(runVerify); +} NDBT_TESTSUITE_END(test_event); -#endif int main(int argc, const char** argv){ ndb_init(); diff --git a/ndb/test/run-test/16node-tests.txt b/ndb/test/run-test/16node-tests.txt index 920448bbfd6..11ade56c28c 100644 --- a/ndb/test/run-test/16node-tests.txt +++ b/ndb/test/run-test/16node-tests.txt @@ -377,7 +377,7 @@ args: -n FragmentTypeSingle T1 max-time: 1500 cmd: testDict -args: -n FragmentTypeAll T1 T6 T7 T8 +args: -n FragmentTypeAllSmall T1 T6 T7 T8 max-time: 1500 cmd: testDict diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am index 6b3ba0bff09..1067328dcc3 100644 --- a/ndb/test/run-test/Makefile.am +++ b/ndb/test/run-test/Makefile.am @@ -6,7 +6,7 @@ include $(top_srcdir)/ndb/config/type_util.mk.am include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am test_PROGRAMS = atrt -test_DATA=daily-basic-tests.txt daily-devel-tests.txt +test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \ atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh diff --git a/ndb/test/run-test/basic.txt b/ndb/test/run-test/basic.txt index a952320db08..ec9e21359e5 100644 --- a/ndb/test/run-test/basic.txt +++ b/ndb/test/run-test/basic.txt @@ -374,7 +374,7 @@ args: -n FragmentTypeSingle T1 max-time: 1500 cmd: testDict -args: -n FragmentTypeAll T1 T6 T7 T8 +args: -n FragmentTypeAllSmall T1 T6 T7 T8 max-time: 1500 cmd: testDict diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index d5b5bbb5309..8528e709eb3 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -23,7 +23,7 @@ max-time: 500 cmd: testBasic args: -n PkInsert -max-time: 600 +max-time: 660 cmd: testBasic args: -n UpdateAndRead @@ -393,7 +393,7 @@ args: -n FragmentTypeSingle T1 max-time: 1500 cmd: testDict -args: -n FragmentTypeAll T1 T6 T7 T8 +args: -n FragmentTypeAllSmall T1 T6 T7 T8 max-time: 1500 cmd: testDict @@ -553,3 +553,32 @@ max-time: 500 cmd: flexHammer args: -r 5 -t 32 +max-time: 300 +cmd: DbCreate +args: + +max-time: 180 +cmd: DbAsyncGenerator +args: -time 60 -p 1 +type: bench + +max-time: 180 +cmd: DbAsyncGenerator +args: -time 60 -p 25 +type: bench + +max-time: 180 +cmd: DbAsyncGenerator +args: -time 60 -p 100 +type: bench + +max-time: 180 +cmd: DbAsyncGenerator +args: -time 60 -p 200 +type: bench + +max-time: 180 +cmd: DbAsyncGenerator +args: -time 60 -p 1 -proc 25 +type: bench + diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt index 2cdd39ffa4c..5c9b36fb836 100644 --- a/ndb/test/run-test/daily-devel-tests.txt +++ b/ndb/test/run-test/daily-devel-tests.txt @@ -204,32 +204,3 @@ max-time: 2500 cmd: test_event args: -n BasicEventOperation T1 T6 -max-time: 300 -cmd: DbCreate -args: - -max-time: 180 -cmd: DbAsyncGenerator -args: -time 60 -p 1 -type: bench - -max-time: 180 -cmd: DbAsyncGenerator -args: -time 60 -p 25 -type: bench - -max-time: 180 -cmd: DbAsyncGenerator -args: -time 60 -p 100 -type: bench - -max-time: 180 -cmd: DbAsyncGenerator -args: -time 60 -p 200 -type: bench - -max-time: 180 -cmd: DbAsyncGenerator -args: -time 60 -p 1 -proc 25 -type: bench - diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp index 4a153208c65..2c395a67900 100644 --- a/ndb/tools/delete_all.cpp +++ b/ndb/tools/delete_all.cpp @@ -44,20 +44,17 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_delete_all.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_delete_all.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); Ndb_cluster_connection con(opt_connect_str); diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index 3bfa332ea79..be0f6942db5 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -44,20 +44,17 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_desc.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_desc.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); Ndb_cluster_connection con(opt_connect_str); diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index c73aa4d42d2..e2bf7f0bfae 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -41,20 +41,17 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_drop_index.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + "d:t:O,/tmp/ndb_drop_index.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); if (argc < 1) { usage(); diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp index 92a103b81af..991e1505486 100644 --- a/ndb/tools/drop_tab.cpp +++ b/ndb/tools/drop_tab.cpp @@ -41,20 +41,17 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_drop_table.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + "d:t:O,/tmp/ndb_drop_table.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); if (argc < 1) { usage(); diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp index a49fbde596e..b4a2235f73b 100644 --- a/ndb/tools/listTables.cpp +++ b/ndb/tools/listTables.cpp @@ -196,13 +196,6 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_show_tables.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); @@ -210,7 +203,11 @@ int main(int argc, char** argv){ const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_show_tables.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); _tabname = argv[0]; diff --git a/ndb/tools/restore/restore_main.cpp b/ndb/tools/restore/restore_main.cpp index 326020d7118..93c40d31adb 100644 --- a/ndb/tools/restore/restore_main.cpp +++ b/ndb/tools/restore/restore_main.cpp @@ -109,8 +109,10 @@ static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { - ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_restore.trace"); +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_restore.trace"; +#endif + ndb_std_get_one_option(optid, opt, argument); switch (optid) { case 'n': if (ga_nodeId == 0) diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index a3d07e3938b..baa18db1ebd 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -85,13 +85,6 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_select_all.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); @@ -99,7 +92,11 @@ int main(int argc, char** argv){ load_defaults("my",load_default_groups,&argc,&argv); const char* _tabname; int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_select_all.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); if ((_tabname = argv[0]) == 0) { usage(); diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp index 41e66c8b367..6fa3c77f15a 100644 --- a/ndb/tools/select_count.cpp +++ b/ndb/tools/select_count.cpp @@ -60,20 +60,17 @@ static void usage() my_print_help(my_long_options); my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_select_count.trace"); -} int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_select_count.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); if (argc < 1) { usage(); diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index cc6a21428c8..db90bd8bd90 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -60,24 +60,19 @@ static void usage() my_print_variables(my_long_options); } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return ndb_std_get_one_option(optid, opt, argument ? argument : - "d:t:O,/tmp/ndb_drop_table.trace"); -} - int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); const char* _hostName = NULL; int ho_error; - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_waiter.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); - char buf[255]; _hostName = argv[0]; if (_hostName == 0) diff --git a/sql/filesort.cc b/sql/filesort.cc index 57ac113cfd5..7dd755308f1 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -420,9 +420,6 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, next_pos=ref_pos; if (! indexfile && ! quick_select) { - file->reset(); // QQ; Shouldn't be needed - if (sort_form->key_read) // QQ Can be removed after the reset - file->extra(HA_EXTRA_KEYREAD); // QQ is removed next_pos=(byte*) 0; /* Find records in sequence */ file->ha_rnd_init(1); file->extra_opt(HA_EXTRA_CACHE, diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 98f39fdf0ac..35bbeca9cc0 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -70,10 +70,10 @@ static handlerton ndbcluster_hton = { #define ERR_PRINT(err) \ DBUG_PRINT("error", ("%d message: %s", err.code, err.message)) -#define ERR_RETURN(err) \ -{ \ +#define ERR_RETURN(err) \ +{ \ const NdbError& tmp= err; \ - ERR_PRINT(tmp); \ + ERR_PRINT(tmp); \ DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ } @@ -101,10 +101,10 @@ static void free_share(NDB_SHARE *share); static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len); static int unpackfrm(const void **data, uint *len, - const void* pack_data); + const void* pack_data); static int ndb_get_table_statistics(Ndb*, const char *, - struct Ndb_statistics *); + struct Ndb_statistics *); // Util thread variables static pthread_t ndb_util_thread; @@ -206,8 +206,8 @@ static int ndb_to_mysql_error(const NdbError *err) { // Push the NDB error message as warning push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - err->code, err->message, "NDB"); + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + err->code, err->message, "NDB"); } if (err_map[i].my_err == -1) return err->code; @@ -225,8 +225,8 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, - h->m_force_send); + NdbTransaction::AbortOnError, + h->m_force_send); } inline @@ -238,8 +238,8 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::Commit, - NdbTransaction::AbortOnError, - h->m_force_send); + NdbTransaction::AbortOnError, + h->m_force_send); } inline @@ -251,8 +251,8 @@ int execute_commit(THD *thd, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::Commit, - NdbTransaction::AbortOnError, - thd->variables.ndb_force_send); + NdbTransaction::AbortOnError, + thd->variables.ndb_force_send); } inline @@ -264,14 +264,13 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AO_IgnoreError, - h->m_force_send); + NdbTransaction::AO_IgnoreError, + h->m_force_send); } /* Place holder for ha_ndbcluster thread specific data */ - Thd_ndb::Thd_ndb() { ndb= new Ndb(g_ndb_cluster_connection, ""); @@ -330,8 +329,8 @@ void ha_ndbcluster::records_update() DBUG_ENTER("ha_ndbcluster::records_update"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); // if (info->records == ~(ha_rows)0) { Ndb *ndb= get_ndb(); @@ -373,8 +372,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) info->no_uncommitted_rows_count= 0; info->records= ~(ha_rows)0; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); } DBUG_VOID_RETURN; } @@ -388,8 +387,8 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c) (struct Ndb_table_local_info *)m_table_info; info->no_uncommitted_rows_count+= c; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + info->no_uncommitted_rows_count)); DBUG_VOID_RETURN; } @@ -408,7 +407,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) Take care of the error that occured in NDB RETURN - 0 No error + 0 No error # The mapped error code */ @@ -427,7 +426,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) NDBDICT *dict= ndb->getDictionary(); DBUG_PRINT("info", ("invalidateTable %s", m_tabname)); dict->invalidateTable(m_tabname); - table->s->version= 0L; /* Free when thread is ready */ + table->s->version= 0L; /* Free when thread is ready */ break; } default: @@ -435,7 +434,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) } res= ndb_to_mysql_error(&err); DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", - err.code, res)); + err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) m_dupkey= table->s->primary_key; @@ -449,7 +448,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) */ bool ha_ndbcluster::get_error_message(int error, - String *buf) + String *buf) { DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); @@ -514,11 +513,11 @@ static inline bool ndb_supported_type(enum_field_types type) */ bool ha_ndbcluster::set_hidden_key(NdbOperation *ndb_op, - uint fieldnr, const byte *field_ptr) + uint fieldnr, const byte *field_ptr) { DBUG_ENTER("set_hidden_key"); DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr, - NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0); + NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0); } @@ -572,28 +571,28 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, pack_len= sizeof(empty_field); field_ptr= (byte *)&empty_field; if (field->is_null()) - empty_field= 0; + empty_field= 0; else - empty_field= 1; + empty_field= 1; } if (! (field->flags & BLOB_FLAG)) { if (field->type() != MYSQL_TYPE_BIT) { - if (field->is_null()) - // Set value to NULL - DBUG_RETURN((ndb_op->setValue(fieldnr, - (char*)NULL, pack_len) != 0)); - // Common implementation for most field types - DBUG_RETURN(ndb_op->setValue(fieldnr, - (char*)field_ptr, pack_len) != 0); + if (field->is_null()) + // Set value to NULL + DBUG_RETURN((ndb_op->setValue(fieldnr, + (char*)NULL, pack_len) != 0)); + // Common implementation for most field types + DBUG_RETURN(ndb_op->setValue(fieldnr, + (char*)field_ptr, pack_len) != 0); } else // if (field->type() == MYSQL_TYPE_BIT) { - longlong bits= field->val_int(); + longlong bits= field->val_int(); - // Round up bit field length to nearest word boundry - pack_len= ((pack_len + 3) >> 2) << 2; + // Round up bit field length to nearest word boundry + pack_len= ((pack_len + 3) >> 2) << 2; DBUG_ASSERT(pack_len <= 8); if (field->is_null()) // Set value to NULL @@ -601,13 +600,13 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("info", ("bit field")); DBUG_DUMP("value", (char*)&bits, pack_len); #ifdef WORDS_BIGENDIAN - if (pack_len < 5) - { - DBUG_RETURN(ndb_op->setValue(fieldnr, - ((char*)&bits)+4, pack_len) != 0); - } + if (pack_len < 5) + { + DBUG_RETURN(ndb_op->setValue(fieldnr, + ((char*)&bits)+4, pack_len) != 0); + } #endif - DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits, pack_len) != 0); + DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits, pack_len) != 0); } } // Blob type @@ -635,7 +634,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); if (set_blob_value) - *set_blob_value= TRUE; + *set_blob_value= TRUE; // No callback needed to write value DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0); } @@ -743,17 +742,17 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, { DBUG_ASSERT(field->ptr != NULL); if (! (field->flags & BLOB_FLAG)) - { + { if (field->type() != MYSQL_TYPE_BIT) { - byte *field_buf; - if (field->pack_length() != 0) - field_buf= buf + (field->ptr - table->record[0]); - else - field_buf= (byte *)&dummy_buf; - m_value[fieldnr].rec= ndb_op->getValue(fieldnr, - field_buf); - } + byte *field_buf; + if (field->pack_length() != 0) + field_buf= buf + (field->ptr - table->record[0]); + else + field_buf= (byte *)&dummy_buf; + m_value[fieldnr].rec= ndb_op->getValue(fieldnr, + field_buf); + } else // if (field->type() == MYSQL_TYPE_BIT) { m_value[fieldnr].rec= ndb_op->getValue(fieldnr); @@ -843,7 +842,7 @@ int ha_ndbcluster::get_metadata(const char *path) */ error= 0; if (readfrm(path, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) + packfrm(data, length, &pack_data, &pack_length)) { my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); @@ -851,24 +850,24 @@ int ha_ndbcluster::get_metadata(const char *path) } if ((pack_length != tab->getFrmLength()) || - (memcmp(pack_data, tab->getFrmData(), pack_length))) + (memcmp(pack_data, tab->getFrmData(), pack_length))) { if (!invalidating_ndb_table) { - DBUG_PRINT("info", ("Invalidating table")); - dict->invalidateTable(m_tabname); - invalidating_ndb_table= TRUE; + DBUG_PRINT("info", ("Invalidating table")); + dict->invalidateTable(m_tabname); + invalidating_ndb_table= TRUE; } else { - DBUG_PRINT("error", - ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", - pack_length, tab->getFrmLength(), - memcmp(pack_data, tab->getFrmData(), pack_length))); - DBUG_DUMP("pack_data", (char*)pack_data, pack_length); - DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); - error= 3; - invalidating_ndb_table= FALSE; + DBUG_PRINT("error", + ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", + pack_length, tab->getFrmLength(), + memcmp(pack_data, tab->getFrmData(), pack_length))); + DBUG_DUMP("pack_data", (char*)pack_data, pack_length); + DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); + error= 3; + invalidating_ndb_table= FALSE; } } else @@ -889,8 +888,8 @@ int ha_ndbcluster::get_metadata(const char *path) } static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, - const NDBINDEX *index, - KEY *key_info) + const NDBINDEX *index, + KEY *key_info) { DBUG_ENTER("fix_unique_index_attr_order"); unsigned sz= index->getNoOfIndexColumns(); @@ -916,8 +915,8 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, const NdbDictionary::Column *c= index->getColumn(j); if (strncmp(field_name, c->getName(), name_sz) == 0) { - data.unique_index_attrid_map[i]= j; - break; + data.unique_index_attrid_map[i]= j; + break; } } DBUG_ASSERT(data.unique_index_attrid_map[i] != 255); @@ -948,40 +947,40 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) { strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS); DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d", - unique_index_name, i)); + unique_index_name, i)); } // Create secondary indexes if in create phase if (phase == ILBP_CREATE) { DBUG_PRINT("info", ("Creating index %u: %s", i, index_name)); switch (idx_type){ - + case PRIMARY_KEY_INDEX: - // Do nothing, already created - break; + // Do nothing, already created + break; case PRIMARY_KEY_ORDERED_INDEX: - error= create_ordered_index(index_name, key_info); - break; + error= create_ordered_index(index_name, key_info); + break; case UNIQUE_ORDERED_INDEX: - if (!(error= create_ordered_index(index_name, key_info))) - error= create_unique_index(unique_index_name, key_info); - break; + if (!(error= create_ordered_index(index_name, key_info))) + error= create_unique_index(unique_index_name, key_info); + break; case UNIQUE_INDEX: - if (!(error= check_index_fields_not_null(i))) - error= create_unique_index(unique_index_name, key_info); - break; + if (!(error= check_index_fields_not_null(i))) + error= create_unique_index(unique_index_name, key_info); + break; case ORDERED_INDEX: - error= create_ordered_index(index_name, key_info); - break; + error= create_ordered_index(index_name, key_info); + break; default: - DBUG_ASSERT(FALSE); - break; + DBUG_ASSERT(FALSE); + break; } if (error) { - DBUG_PRINT("error", ("Failed to create index %u", i)); - drop_table(); - break; + DBUG_PRINT("error", ("Failed to create index %u", i)); + drop_table(); + break; } } // Add handles to index objects @@ -1033,9 +1032,9 @@ int ha_ndbcluster::check_index_fields_not_null(uint inx) Field* field= key_part->field; if (field->maybe_null()) { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),field->field_name); - DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); + my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), + MYF(0),field->field_name); + DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); } } @@ -1171,7 +1170,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) char buf[256]; shrink_varchar(field, ptr, buf); if (set_ndb_key(op, field, - key_part->fieldnr-1, ptr)) + key_part->fieldnr-1, ptr)) ERR_RETURN(op->getNdbError()); key += key_part->store_length; } @@ -1190,7 +1189,7 @@ int ha_ndbcluster::set_primary_key_from_old_data(NdbOperation *op, const byte *o { Field* field= key_part->field; if (set_ndb_key(op, field, - key_part->fieldnr-1, old_data+key_part->offset)) + key_part->fieldnr-1, old_data+key_part->offset)) ERR_RETURN(op->getNdbError()); } DBUG_RETURN(0); @@ -1216,8 +1215,8 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op) int ha_ndbcluster::set_index_key(NdbOperation *op, - const KEY *key_info, - const byte * key_ptr) + const KEY *key_info, + const byte * key_ptr) { DBUG_ENTER("ha_ndbcluster::set_index_key"); uint i; @@ -1250,11 +1249,11 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) { Field *field= table->field[i]; if ((thd->query_id == field->query_id) || - ((field->flags & PRI_KEY_FLAG)) || - m_retrieve_all_fields) + ((field->flags & PRI_KEY_FLAG)) || + m_retrieve_all_fields) { if (get_ndb_value(op, field, i, buf)) - ERR_RETURN(op->getNdbError()); + ERR_RETURN(op->getNdbError()); } else { @@ -1363,10 +1362,10 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) { Field *field= table->field[i]; if (!((field->flags & PRI_KEY_FLAG) || - (thd->query_id == field->query_id))) + (thd->query_id == field->query_id))) { if (get_ndb_value(op, field, i, new_data)) - ERR_RETURN(trans->getNdbError()); + ERR_RETURN(trans->getNdbError()); } } @@ -1387,7 +1386,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) { Field *field= table->field[i]; if (!((field->flags & PRI_KEY_FLAG) || - (thd->query_id == field->query_id))) + (thd->query_id == field->query_id))) { m_value[i].ptr= NULL; } @@ -1429,7 +1428,7 @@ int ha_ndbcluster::peek_row() */ int ha_ndbcluster::unique_index_read(const byte *key, - uint key_len, byte *buf) + uint key_len, byte *buf) { int res; NdbTransaction *trans= m_active_trans; @@ -1441,7 +1440,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); if (!(op= trans->getNdbIndexOperation((NDBINDEX *) - m_index[active_index].unique_index, + m_index[active_index].unique_index, (const NDBTAB *) m_table)) || op->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); @@ -1479,7 +1478,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) if (m_ops_pending && m_blobs_pending) { if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); + DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; } @@ -1494,29 +1493,29 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) // 2: No more cached records /* - Before fetching more rows and releasing lock(s), - all pending update or delete operations should - be sent to NDB + Before fetching more rows and releasing lock(s), + all pending update or delete operations should + be sent to NDB */ DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); if (m_ops_pending) { - if (m_transaction_on) - { - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(-1); - } - else - { - if (execute_commit(this,trans) != 0) - DBUG_RETURN(-1); - if(trans->restart() != 0) - { - DBUG_ASSERT(0); - DBUG_RETURN(-1); - } - } - m_ops_pending= 0; + if (m_transaction_on) + { + if (execute_no_commit(this,trans) != 0) + DBUG_RETURN(-1); + } + else + { + if (execute_commit(this,trans) != 0) + DBUG_RETURN(-1); + if(trans->restart() != 0) + { + DBUG_ASSERT(0); + DBUG_RETURN(-1); + } + } + m_ops_pending= 0; } contact_ndb= (check == 2); } @@ -1575,8 +1574,8 @@ inline int ha_ndbcluster::next_result(byte *buf) */ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, - const key_range *keys[2], - uint range_no) + const key_range *keys[2], + uint range_no) { const KEY *const key_info= table->key_info + active_index; const uint key_parts= key_info->key_parts; @@ -1696,7 +1695,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, DBUG_PRINT("error", ("key %d unknown flag %d", j, p.key->flag)); DBUG_ASSERT(false); // Stop setting bounds but continue with what we have - op->end_of_bound(range_no); + op->end_of_bound(range_no); DBUG_RETURN(0); } } @@ -1732,13 +1731,13 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, // Set bound if not cancelled via type -1 if (p.bound_type != -1) - { + { const char* ptr= p.bound_ptr; char buf[256]; shrink_varchar(field, ptr, buf); if (op->setBound(i, p.bound_type, ptr)) ERR_RETURN(op->getNdbError()); - } + } } } @@ -1753,8 +1752,8 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, */ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, byte* buf) + const key_range *end_key, + bool sorted, bool descending, byte* buf) { int res; bool restart; @@ -1775,9 +1774,9 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *) - m_index[active_index].index, - (const NDBTAB *) m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending)) + m_index[active_index].index, + (const NDBTAB *) m_table)) || + op->readTuples(lm, 0, parallelism, sorted, descending)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; } else { @@ -1786,7 +1785,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, DBUG_ASSERT(op->getSorted() == sorted); DBUG_ASSERT(op->getLockMode() == - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); if(op->reset_bounds(m_force_send)) DBUG_RETURN(ndb_err(m_active_trans)); } @@ -1797,6 +1796,9 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, if (res) DBUG_RETURN(res); } + + if (!restart && generate_scan_filter(m_cond_stack, op)) + DBUG_RETURN(ndb_err(trans)); if (!restart && (res= define_read_attrs(buf, op))) { @@ -1810,96 +1812,6 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, } /* - Start a filtered scan in NDB. - - NOTE - This function is here as an example of how to start a - filtered scan. It should be possible to replace full_table_scan - with this function and make a best effort attempt - at filtering out the irrelevant data by converting the "items" - into interpreted instructions. - This would speed up table scans where there is a limiting WHERE clause - that doesn't match any index in the table. - - */ - -int ha_ndbcluster::filtered_scan(const byte *key, uint key_len, - byte *buf, - enum ha_rkey_function find_flag) -{ - int res; - NdbTransaction *trans= m_active_trans; - NdbScanOperation *op; - - DBUG_ENTER("filtered_scan"); - DBUG_PRINT("enter", ("key_len: %u, index: %u", - key_len, active_index)); - DBUG_DUMP("key", (char*)key, key_len); - DBUG_PRINT("info", ("Starting a new filtered scan on %s", - m_tabname)); - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbScanOperation((const NDBTAB *) m_table)) || - op->readTuples(lm, 0, parallelism)) - ERR_RETURN(trans->getNdbError()); - m_active_cursor= op; - - { - // Start scan filter - NdbScanFilter sf(op); - sf.begin(); - - // Set filter using the supplied key data - byte *key_ptr= (byte *) key; - uint tot_len= 0; - KEY* key_info= table->key_info + active_index; - for (uint k= 0; k < key_info->key_parts; k++) - { - KEY_PART_INFO* key_part= key_info->key_part+k; - Field* field= key_part->field; - uint ndb_fieldnr= key_part->fieldnr-1; - DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr)); - //const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr); - uint32 field_len= field->pack_length(); - DBUG_DUMP("key", (char*)key, field_len); - - DBUG_PRINT("info", ("Column %s, type: %d, len: %d", - field->field_name, field->real_type(), field_len)); - - // Define scan filter - if (field->real_type() == MYSQL_TYPE_STRING) - sf.cmp(NdbScanFilter::COND_EQ, ndb_fieldnr, key_ptr, field_len); - else - { - if (field_len == 8) - sf.eq(ndb_fieldnr, (Uint64)*key_ptr); - else if (field_len <= 4) - sf.eq(ndb_fieldnr, (Uint32)*key_ptr); - else - DBUG_RETURN(1); - } - - key_ptr += field_len; - tot_len += field_len; - - if (tot_len >= key_len) - break; - } - // End scan filter - sf.end(); - } - - if((res= define_read_attrs(buf, op))) - DBUG_RETURN(res); - - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); - DBUG_PRINT("exit", ("Scan started successfully")); - DBUG_RETURN(next_result(buf)); -} - -/* Start full table scan in NDB */ @@ -1918,7 +1830,8 @@ int ha_ndbcluster::full_table_scan(byte *buf) op->readTuples(lm, 0, parallelism)) ERR_RETURN(trans->getNdbError()); m_active_cursor= op; - + if (generate_scan_filter(m_cond_stack, op)) + DBUG_RETURN(ndb_err(trans)); if((res= define_read_attrs(buf, op))) DBUG_RETURN(res); @@ -1996,7 +1909,7 @@ int ha_ndbcluster::write_row(byte *record) { Field *field= table->field[i]; if (!(field->flags & PRI_KEY_FLAG) && - set_ndb_value(op, field, i, &set_blob_value)) + set_ndb_value(op, field, i, &set_blob_value)) { m_skip_auto_increment= TRUE; ERR_RETURN(op->getNdbError()); @@ -2019,31 +1932,31 @@ int ha_ndbcluster::write_row(byte *record) { // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted:%d, bulk_insert_rows: %d", - (int)m_rows_inserted, (int)m_bulk_insert_rows)); + "rows_inserted:%d, bulk_insert_rows: %d", + (int)m_rows_inserted, (int)m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { if (execute_no_commit(this,trans) != 0) { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); } } else { if (execute_commit(this,trans) != 0) { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); } if(trans->restart() != 0) { - DBUG_ASSERT(0); - DBUG_RETURN(-1); + DBUG_ASSERT(0); + DBUG_RETURN(-1); } } } @@ -2052,11 +1965,11 @@ int ha_ndbcluster::write_row(byte *record) Ndb *ndb= get_ndb(); Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; DBUG_PRINT("info", - ("Trying to set next auto increment value to %lu", + ("Trying to set next auto increment value to %lu", (ulong) next_val)); if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)) DBUG_PRINT("info", - ("Setting next auto increment value to %u", next_val)); + ("Setting next auto increment value to %u", next_val)); } m_skip_auto_increment= TRUE; @@ -2067,7 +1980,7 @@ int ha_ndbcluster::write_row(byte *record) /* Compare if a key in a row has changed */ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, - const byte * new_row) + const byte * new_row) { KEY_PART_INFO *key_part=table->key_info[keynr].key_part; KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts; @@ -2077,22 +1990,22 @@ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, if (key_part->null_bit) { if ((old_row[key_part->null_offset] & key_part->null_bit) != - (new_row[key_part->null_offset] & key_part->null_bit)) - return 1; + (new_row[key_part->null_offset] & key_part->null_bit)) + return 1; } if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), - (char*) (new_row + key_part->offset), - (ulong) key_part->length)) - return 1; + (char*) (new_row + key_part->offset), + (ulong) key_part->length)) + return 1; } else { if (memcmp(old_row+key_part->offset, new_row+key_part->offset, - key_part->length)) - return 1; + key_part->length)) + return 1; } } return 0; @@ -2170,7 +2083,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) else { if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->updateTuple() != 0) + op->updateTuple() != 0) ERR_RETURN(trans->getNdbError()); if (table->s->primary_key == MAX_KEY) @@ -2186,13 +2099,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH); if (set_hidden_key(op, no_fields, rec->aRef())) - ERR_RETURN(op->getNdbError()); + ERR_RETURN(op->getNdbError()); } else { int res; if ((res= set_primary_key_from_old_data(op, old_data))) - DBUG_RETURN(res); + DBUG_RETURN(res); } } @@ -2202,7 +2115,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) Field *field= table->field[i]; if (((thd->query_id == field->query_id) || m_retrieve_all_fields) && (!(field->flags & PRI_KEY_FLAG)) && - set_ndb_value(op, field, i)) + set_ndb_value(op, field, i)) ERR_RETURN(op->getNdbError()); } @@ -2253,7 +2166,7 @@ int ha_ndbcluster::delete_row(const byte *record) { if (!(op=trans->getNdbOperation((const NDBTAB *) m_table)) || - op->deleteTuple() != 0) + op->deleteTuple() != 0) ERR_RETURN(trans->getNdbError()); no_uncommitted_rows_update(-1); @@ -2267,15 +2180,15 @@ int ha_ndbcluster::delete_row(const byte *record) DBUG_ASSERT(rec != NULL); if (set_hidden_key(op, no_fields, rec->aRef())) - ERR_RETURN(op->getNdbError()); + ERR_RETURN(op->getNdbError()); } else { int res; if ((res= (m_primary_key_update ? - set_primary_key_from_old_data(op, record) - : set_primary_key(op)))) - return res; + set_primary_key_from_old_data(op, record) + : set_primary_key(op)))) + return res; } } @@ -2292,7 +2205,7 @@ int ha_ndbcluster::delete_row(const byte *record) SYNOPSIS unpack_record() - buf Buffer to store read row + buf Buffer to store read row NOTE The data for each row is read directly into the @@ -2328,9 +2241,9 @@ void ha_ndbcluster::unpack_record(byte* buf) if (pack_len < 5) { DBUG_PRINT("info", ("bit field H'%.8X", - (*value).rec->u_32_value())); + (*value).rec->u_32_value())); ((Field_bit *) *field)->store((longlong) - (*value).rec->u_32_value()); + (*value).rec->u_32_value()); } else { @@ -2338,7 +2251,7 @@ void ha_ndbcluster::unpack_record(byte* buf) *(Uint32 *)(*value).rec->aRef(), *((Uint32 *)(*value).rec->aRef()+1))); ((Field_bit *) *field)->store((longlong) - (*value).rec->u_64_value()); } + (*value).rec->u_64_value()); } } } else @@ -2348,10 +2261,10 @@ void ha_ndbcluster::unpack_record(byte* buf) #ifndef DBUG_OFF int ret= #endif - ndb_blob->getNull(isNull); + ndb_blob->getNull(isNull); DBUG_ASSERT(ret == 0); if (isNull) - (*field)->set_null(row_offset); + (*field)->set_null(row_offset); } } } @@ -2369,7 +2282,7 @@ void ha_ndbcluster::unpack_record(byte* buf) DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no, hidden_col->getName(), rec->u_64_value())); } - print_results(); + //print_results(); #endif DBUG_VOID_RETURN; } @@ -2479,8 +2392,8 @@ check_null_in_key(const KEY* key_info, const byte *key, uint key_len) } int ha_ndbcluster::index_read(byte *buf, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) + const byte *key, uint key_len, + enum ha_rkey_function find_flag) { DBUG_ENTER("ha_ndbcluster::index_read"); DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", @@ -2495,7 +2408,7 @@ int ha_ndbcluster::index_read(byte *buf, if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len) { if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + DBUG_RETURN(error); DBUG_RETURN(pk_read(key, key_len, buf)); } else if (type == PRIMARY_KEY_INDEX) @@ -2506,10 +2419,10 @@ int ha_ndbcluster::index_read(byte *buf, case UNIQUE_ORDERED_INDEX: case UNIQUE_INDEX: if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len && - !check_null_in_key(key_info, key, key_len)) + !check_null_in_key(key_info, key, key_len)) { if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + DBUG_RETURN(error); DBUG_RETURN(unique_index_read(key, key_len, buf)); } else if (type == UNIQUE_INDEX) @@ -2547,8 +2460,8 @@ int ha_ndbcluster::index_read(byte *buf, int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) + const byte *key, uint key_len, + enum ha_rkey_function find_flag) { statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); DBUG_ENTER("ha_ndbcluster::index_read_idx"); @@ -2562,7 +2475,7 @@ int ha_ndbcluster::index_next(byte *buf) { DBUG_ENTER("ha_ndbcluster::index_next"); statistic_increment(current_thd->status_var.ha_read_next_count, - &LOCK_status); + &LOCK_status); DBUG_RETURN(next_result(buf)); } @@ -2571,7 +2484,7 @@ int ha_ndbcluster::index_prev(byte *buf) { DBUG_ENTER("ha_ndbcluster::index_prev"); statistic_increment(current_thd->status_var.ha_read_prev_count, - &LOCK_status); + &LOCK_status); DBUG_RETURN(next_result(buf)); } @@ -2580,7 +2493,7 @@ int ha_ndbcluster::index_first(byte *buf) { DBUG_ENTER("ha_ndbcluster::index_first"); statistic_increment(current_thd->status_var.ha_read_first_count, - &LOCK_status); + &LOCK_status); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first @@ -2603,9 +2516,9 @@ int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len) inline int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_r, bool sorted, - byte* buf) + const key_range *end_key, + bool eq_r, bool sorted, + byte* buf) { KEY* key_info; int error= 1; @@ -2617,11 +2530,11 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, case PRIMARY_KEY_INDEX: key_info= table->key_info + active_index; if (start_key && - start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT) + start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT) { if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + DBUG_RETURN(error); error= pk_read(start_key->key, start_key->length, buf); DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); } @@ -2630,11 +2543,11 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, case UNIQUE_INDEX: key_info= table->key_info + active_index; if (start_key && start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, start_key->key, start_key->length)) + start_key->flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, start_key->key, start_key->length)) { if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + DBUG_RETURN(error); error= unique_index_read(start_key->key, start_key->length, buf); DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); } @@ -2650,17 +2563,17 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, int ha_ndbcluster::read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_r, bool sorted) + const key_range *end_key, + bool eq_r, bool sorted) { byte* buf= table->record[0]; DBUG_ENTER("ha_ndbcluster::read_range_first"); DBUG_RETURN(read_range_first_to_buf(start_key, - end_key, - eq_r, - sorted, - buf)); + end_key, + eq_r, + sorted, + buf)); } int ha_ndbcluster::read_range_next() @@ -2731,7 +2644,7 @@ int ha_ndbcluster::rnd_next(byte *buf) { DBUG_ENTER("rnd_next"); statistic_increment(current_thd->status_var.ha_read_rnd_next_count, - &LOCK_status); + &LOCK_status); if (!m_active_cursor) DBUG_RETURN(full_table_scan(buf)); @@ -2750,7 +2663,7 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos) { DBUG_ENTER("rnd_pos"); statistic_increment(current_thd->status_var.ha_read_rnd_count, - &LOCK_status); + &LOCK_status); // The primary key for the record is stored in pos // Perform a pk_read using primary key "index" DBUG_RETURN(pk_read(pos, ref_length, buf)); @@ -2832,9 +2745,9 @@ void ha_ndbcluster::info(uint flag) if (m_table_info) { if (m_ha_not_exact_count) - records= 100; + records= 100; else - records_update(); + records_update(); } else { @@ -2843,16 +2756,16 @@ void ha_ndbcluster::info(uint flag) Ndb *ndb= get_ndb(); struct Ndb_statistics stat; if (current_thd->variables.ndb_use_exact_count && - ndb_get_table_statistics(ndb, m_tabname, &stat) == 0) + ndb_get_table_statistics(ndb, m_tabname, &stat) == 0) { - mean_rec_length= stat.row_size; - data_file_length= stat.fragment_memory; - records= stat.row_count; + mean_rec_length= stat.row_size; + data_file_length= stat.fragment_memory; + records= stat.row_count; } else { - mean_rec_length= 0; - records= 100; + mean_rec_length= 0; + records= 100; } } } @@ -2884,6 +2797,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_RESET: /* Reset database to after open */ DBUG_PRINT("info", ("HA_EXTRA_RESET")); + DBUG_PRINT("info", ("Clearing condition stack")); + cond_clear(); break; case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */ DBUG_PRINT("info", ("HA_EXTRA_CACHE")); @@ -2970,8 +2885,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) m_ignore_dup_key= FALSE; break; case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those - where field->query_id is the same as - the current query id */ + where field->query_id is the same as + the current query id */ DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS")); m_retrieve_all_fields= TRUE; break; @@ -3077,15 +2992,7 @@ int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size) } -int ha_ndbcluster::reset() -{ - DBUG_ENTER("reset"); - // Reset what? - DBUG_RETURN(1); -} - static const char *ha_ndb_bas_ext[]= { ha_ndb_ext, NullS }; - const char** ha_ndbcluster::bas_ext() const { @@ -3104,7 +3011,7 @@ double ha_ndbcluster::scan_time() DBUG_ENTER("ha_ndbcluster::scan_time()"); double res= rows2double(records*1000); DBUG_PRINT("exit", ("table: %s value: %f", - m_tabname, res)); + m_tabname, res)); DBUG_RETURN(res); } @@ -3206,14 +3113,14 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) trans= ndb->startTransaction(); if (trans == NULL) ERR_RETURN(ndb->getNdbError()); - no_uncommitted_rows_reset(thd); + no_uncommitted_rows_reset(thd); thd_ndb->stmt= trans; trans_register_ha(thd, FALSE, &ndbcluster_hton); } else { if (!thd_ndb->all) - { + { // Not autocommit transaction // A "master" transaction ha not been started yet DBUG_PRINT("trans",("starting transaction, all")); @@ -3221,7 +3128,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) trans= ndb->startTransaction(); if (trans == NULL) ERR_RETURN(ndb->getNdbError()); - no_uncommitted_rows_reset(thd); + no_uncommitted_rows_reset(thd); thd_ndb->all= trans; trans_register_ha(thd, TRUE, &ndbcluster_hton); @@ -3232,7 +3139,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) Check if it should be read or write lock */ if (thd->options & (OPTION_TABLE_LOCK)) - { + { //lockThisTable(); DBUG_PRINT("info", ("Locking the table..." )); } @@ -3272,7 +3179,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) const NDBTAB *tab; void *tab_info; if (!(tab= dict->getTable(m_tabname, &tab_info))) - ERR_RETURN(dict->getNdbError()); + ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); m_table= (void *)tab; m_table_info= tab_info; @@ -3758,7 +3665,7 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; #else no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1 - +1/*correct rounding*/)/2; + +1/*correct rounding*/)/2; #endif } { @@ -3768,8 +3675,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { ftype= NDBTAB::FragAllLarge; if (no_fragments > 4*no_nodes) - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, - "Ndb might have problems storing the max amount of rows specified"); + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); } else if (no_fragments > no_nodes) ftype= NDBTAB::FragAllMedium; @@ -3780,8 +3687,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) } int ha_ndbcluster::create(const char *name, - TABLE *form, - HA_CREATE_INFO *info) + TABLE *form, + HA_CREATE_INFO *info) { NDBTAB tab; NDBCOL col; @@ -3827,7 +3734,7 @@ int ha_ndbcluster::create(const char *name, Field *field= form->field[i]; DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", field->field_name, field->real_type(), - field->pack_length())); + field->pack_length())); if ((my_errno= create_ndb_column(col, field, info))) DBUG_RETURN(my_errno); tab.addColumn(col); @@ -3865,10 +3772,10 @@ int ha_ndbcluster::create(const char *name, NdbDictionary::Column * col= tab.getColumn(i); int size= pk_length + (col->getPartSize()+3)/4 + 7; if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS && - (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) + (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) { - size= NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; - col->setPartSize(4*size); + size= NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; + col->setPartSize(4*size); } /** * If size > NDB_MAX and pk_length+7 >= NDB_MAX @@ -3910,14 +3817,14 @@ int ha_ndbcluster::create(const char *name, int ha_ndbcluster::create_ordered_index(const char *name, - KEY *key_info) + KEY *key_info) { DBUG_ENTER("ha_ndbcluster::create_ordered_index"); DBUG_RETURN(create_index(name, key_info, FALSE)); } int ha_ndbcluster::create_unique_index(const char *name, - KEY *key_info) + KEY *key_info) { DBUG_ENTER("ha_ndbcluster::create_unique_index"); @@ -3930,8 +3837,8 @@ int ha_ndbcluster::create_unique_index(const char *name, */ int ha_ndbcluster::create_index(const char *name, - KEY *key_info, - bool unique) + KEY *key_info, + bool unique) { Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); @@ -4121,11 +4028,11 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_table(NULL), m_table_info(NULL), m_table_flags(HA_REC_NOT_IN_SEQ | - HA_NULL_IN_KEY | - HA_AUTO_PART_KEY | - HA_NO_PREFIX_CHAR_KEYS | - HA_NEED_READ_RANGE_BUFFER | - HA_CAN_BIT_FIELD), + HA_NULL_IN_KEY | + HA_AUTO_PART_KEY | + HA_NO_PREFIX_CHAR_KEYS | + HA_NEED_READ_RANGE_BUFFER | + HA_CAN_BIT_FIELD), m_share(0), m_use_write(FALSE), m_ignore_dup_key(FALSE), @@ -4146,6 +4053,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch(32), m_transaction_on(TRUE), + m_cond_stack(NULL), m_multi_cursor(NULL) { int i; @@ -4192,6 +4100,10 @@ ha_ndbcluster::~ha_ndbcluster() } DBUG_ASSERT(m_active_trans == NULL); + // Discard the condition stack + DBUG_PRINT("info", ("Clearing condition stack")); + cond_clear(); + DBUG_VOID_RETURN; } @@ -4340,7 +4252,7 @@ int ndbcluster_close_connection(THD *thd) */ int ndbcluster_discover(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen) + const void** frmblob, uint* frmlen) { uint len; const void* data; @@ -4419,7 +4331,7 @@ int ndbcluster_table_exists(THD* thd, const char *db, const char *name) extern "C" byte* tables_get_key(const char *entry, uint *length, - my_bool not_used __attribute__((unused))) + my_bool not_used __attribute__((unused))) { *length= strlen(entry); return (byte*) entry; @@ -4427,7 +4339,7 @@ extern "C" byte* tables_get_key(const char *entry, uint *length, int ndbcluster_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir, List<char> *files) + const char *wild, bool dir, List<char> *files) { DBUG_ENTER("ndbcluster_find_files"); DBUG_PRINT("enter", ("db: %s", db)); @@ -4447,18 +4359,18 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, // List tables in NDB NDBDICT *dict= ndb->getDictionary(); if (dict->listObjects(list, - NdbDictionary::Object::UserTable) != 0) + NdbDictionary::Object::UserTable) != 0) ERR_RETURN(dict->getNdbError()); if (hash_init(&ndb_tables, system_charset_info,list.count,0,0, - (hash_get_key)tables_get_key,0,0)) + (hash_get_key)tables_get_key,0,0)) { DBUG_PRINT("error", ("Failed to init HASH ndb_tables")); DBUG_RETURN(-1); } if (hash_init(&ok_tables, system_charset_info,32,0,0, - (hash_get_key)tables_get_key,0,0)) + (hash_get_key)tables_get_key,0,0)) { DBUG_PRINT("error", ("Failed to init HASH ok_tables")); hash_free(&ndb_tables); @@ -4479,11 +4391,11 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, { if (lower_case_table_names) { - if (wild_case_compare(files_charset_info, t.name, wild)) - continue; + if (wild_case_compare(files_charset_info, t.name, wild)) + continue; } else if (wild_compare(t.name,wild,0)) - continue; + continue; } DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", t.name)); my_hash_insert(&ndb_tables, (byte*)thd->strdup(t.name)); @@ -4505,7 +4417,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, // File is not in NDB, check for .ndb file with this name (void)strxnmov(name, FN_REFLEN, - mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS); + mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS); DBUG_PRINT("info", ("Check access for %s", name)); if (access(name, F_OK)) { @@ -4557,10 +4469,10 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, table_list.db= (char*) db; table_list.alias= table_list.table_name= (char*)file_name; (void)mysql_rm_table_part2(thd, &table_list, - /* if_exists */ FALSE, - /* drop_temporary */ FALSE, - /* drop_view */ FALSE, - /* dont_log_query*/ TRUE); + /* if_exists */ FALSE, + /* drop_temporary */ FALSE, + /* drop_view */ FALSE, + /* dont_log_query*/ TRUE); /* Clear error message that is returned when table is deleted */ thd->clear_error(); } @@ -4608,7 +4520,7 @@ ndbcluster_init() new Ndb_cluster_connection(opt_ndbcluster_connectstring)) == 0) { DBUG_PRINT("error",("Ndb_cluster_connection(%s)", - opt_ndbcluster_connectstring)); + opt_ndbcluster_connectstring)); goto ndbcluster_init_error; } @@ -4632,8 +4544,8 @@ ndbcluster_init() { connect_callback(); DBUG_PRINT("info",("NDBCLUSTER storage engine at %s on port %d", - g_ndb_cluster_connection->get_connected_host(), - g_ndb_cluster_connection->get_connected_port())); + g_ndb_cluster_connection->get_connected_host(), + g_ndb_cluster_connection->get_connected_port())); g_ndb_cluster_connection->wait_until_ready(10,3); } else if(res == 1) @@ -4647,10 +4559,10 @@ ndbcluster_init() { char buf[1024]; DBUG_PRINT("info", - ("NDBCLUSTER storage engine not started, " - "will connect using %s", - g_ndb_cluster_connection-> - get_connectstring(buf,sizeof(buf)))); + ("NDBCLUSTER storage engine not started, " + "will connect using %s", + g_ndb_cluster_connection-> + get_connectstring(buf,sizeof(buf)))); } #endif } @@ -4925,7 +4837,7 @@ uint8 ha_ndbcluster::table_cache_type() uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, - Uint64 *commit_count) + Uint64 *commit_count) { DBUG_ENTER("ndb_get_commitcount"); @@ -4935,12 +4847,12 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, char name[FN_REFLEN]; NDB_SHARE *share; (void)strxnmov(name, FN_REFLEN, - "./",dbname,"/",tabname,NullS); + "./",dbname,"/",tabname,NullS); DBUG_PRINT("info", ("name: %s", name)); pthread_mutex_lock(&ndbcluster_mutex); if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, - (byte*) name, - strlen(name)))) + (byte*) name, + strlen(name)))) { pthread_mutex_unlock(&ndbcluster_mutex); DBUG_RETURN(1); @@ -4997,8 +4909,8 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, static my_bool ndbcluster_cache_retrieval_allowed(THD *thd, - char *full_name, uint full_name_len, - ulonglong *engine_data) + char *full_name, uint full_name_len, + ulonglong *engine_data) { DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); @@ -5008,7 +4920,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd, char *tabname= dbname+strlen(dbname)+1; DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", - dbname, tabname, is_autocommit)); + dbname, tabname, is_autocommit)); if (!is_autocommit) DBUG_RETURN(FALSE); @@ -5019,7 +4931,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd, DBUG_RETURN(FALSE); } DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", - *engine_data, commit_count)); + *engine_data, commit_count)); if (*engine_data != commit_count) { *engine_data= commit_count; /* invalidate */ @@ -5056,15 +4968,15 @@ ndbcluster_cache_retrieval_allowed(THD *thd, my_bool ha_ndbcluster::register_query_cache_table(THD *thd, - char *full_name, uint full_name_len, - qc_engine_callback *engine_callback, - ulonglong *engine_data) + char *full_name, uint full_name_len, + qc_engine_callback *engine_callback, + ulonglong *engine_data) { DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d", - m_dbname,m_tabname,is_autocommit)); + m_dbname,m_tabname,is_autocommit)); if (!is_autocommit) DBUG_RETURN(FALSE); @@ -5091,7 +5003,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd, */ static byte* ndbcluster_get_key(NDB_SHARE *share,uint *length, - my_bool not_used __attribute__((unused))) + my_bool not_used __attribute__((unused))) { *length=share->table_name_length; return (byte*) share->table_name; @@ -5124,9 +5036,9 @@ static NDB_SHARE* get_share(const char *table_name) } } DBUG_PRINT("share", - ("table_name: %s, length: %d, use_count: %d, commit_count: %d", - share->table_name, share->table_name_length, share->use_count, - share->commit_count)); + ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + share->table_name, share->table_name_length, share->use_count, + share->commit_count)); share->use_count++; pthread_mutex_unlock(&ndbcluster_mutex); @@ -5168,7 +5080,7 @@ struct frm_blob_struct static int packfrm(const void *data, uint len, - const void **pack_data, uint *pack_len) + const void **pack_data, uint *pack_len) { int error; ulong org_len, comp_len; @@ -5210,7 +5122,7 @@ err: static int unpackfrm(const void **unpack_data, uint *unpack_len, - const void *pack_data) + const void *pack_data) { const frm_blob_struct *blob= (frm_blob_struct*)pack_data; byte *data; @@ -5218,12 +5130,12 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, DBUG_ENTER("unpackfrm"); DBUG_PRINT("enter", ("pack_data: %x", pack_data)); - complen= uint4korr((char*)&blob->head.complen); - orglen= uint4korr((char*)&blob->head.orglen); - ver= uint4korr((char*)&blob->head.ver); + complen= uint4korr((char*)&blob->head.complen); + orglen= uint4korr((char*)&blob->head.orglen); + ver= uint4korr((char*)&blob->head.ver); DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", - ver,complen,orglen)); + ver,complen,orglen)); DBUG_DUMP("blob->data", (char*) blob->data, complen); if (ver != 1) @@ -5249,7 +5161,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, static int ndb_get_table_statistics(Ndb* ndb, const char * table, - struct Ndb_statistics * ndbstat) + struct Ndb_statistics * ndbstat) { DBUG_ENTER("ndb_get_table_statistics"); DBUG_PRINT("enter", ("table: %s", table)); @@ -5278,8 +5190,8 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem); check= pTrans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, - TRUE); + NdbTransaction::AbortOnError, + TRUE); if (check == -1) break; @@ -5292,7 +5204,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, sum_rows+= rows; sum_commits+= commits; if (sum_row_size < size) - sum_row_size= size; + sum_row_size= size; sum_mem+= mem; } @@ -5309,7 +5221,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, ndbstat->fragment_memory= sum_mem; DBUG_PRINT("exit", ("records: %u commits: %u row_size: %d mem: %d", - sum_rows, sum_commits, sum_row_size, sum_mem)); + sum_rows, sum_commits, sum_row_size, sum_mem)); DBUG_RETURN(0); } while(0); @@ -5333,7 +5245,7 @@ int ha_ndbcluster::write_ndb_file() DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname)); (void)strxnmov(path, FN_REFLEN, - mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); + mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) { @@ -5346,10 +5258,10 @@ int ha_ndbcluster::write_ndb_file() int ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, - KEY_MULTI_RANGE *ranges, - uint range_count, - bool sorted, - HANDLER_BUFFER *buffer) + KEY_MULTI_RANGE *ranges, + uint range_count, + bool sorted, + HANDLER_BUFFER *buffer) { DBUG_ENTER("ha_ndbcluster::read_multi_range_first"); @@ -5366,10 +5278,10 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, */ m_disable_multi_read= true; DBUG_RETURN(handler::read_multi_range_first(found_range_p, - ranges, - range_count, - sorted, - buffer)); + ranges, + range_count, + sorted, + buffer)); } m_disable_multi_read= false; @@ -5417,13 +5329,13 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, { multi_range_curr->range_flag |= UNIQUE_RANGE; if ((op= m_active_trans->getNdbOperation(tab)) && - !op->readTuple(lm) && - !set_primary_key(op, multi_range_curr->start_key.key) && - !define_read_attrs(curr, op) && - (op->setAbortOption(AO_IgnoreError), true)) - curr += reclength; + !op->readTuple(lm) && + !set_primary_key(op, multi_range_curr->start_key.key) && + !define_read_attrs(curr, op) && + (op->setAbortOption(AO_IgnoreError), true)) + curr += reclength; else - ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); + ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); break; } break; @@ -5432,60 +5344,62 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, { multi_range_curr->range_flag |= UNIQUE_RANGE; if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) && - !op->readTuple(lm) && - !set_index_key(op, key_info, multi_range_curr->start_key.key) && - !define_read_attrs(curr, op) && - (op->setAbortOption(AO_IgnoreError), true)) - curr += reclength; + !op->readTuple(lm) && + !set_index_key(op, key_info, multi_range_curr->start_key.key) && + !define_read_attrs(curr, op) && + (op->setAbortOption(AO_IgnoreError), true)) + curr += reclength; else - ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); + ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); break; } case PRIMARY_KEY_ORDERED_INDEX: if (multi_range_curr->start_key.length == key_info->key_length && - multi_range_curr->start_key.flag == HA_READ_KEY_EXACT) - goto pk; + multi_range_curr->start_key.flag == HA_READ_KEY_EXACT) + goto pk; goto range; case UNIQUE_ORDERED_INDEX: if (multi_range_curr->start_key.length == key_info->key_length && - multi_range_curr->start_key.flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, multi_range_curr->start_key.key, - multi_range_curr->start_key.length)) - goto sk; + multi_range_curr->start_key.flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, multi_range_curr->start_key.key, + multi_range_curr->start_key.length)) + goto sk; goto range; case ORDERED_INDEX: { range: multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE; if (scanOp == 0) { - if (m_multi_cursor) - { - scanOp= m_multi_cursor; - DBUG_ASSERT(scanOp->getSorted() == sorted); - DBUG_ASSERT(scanOp->getLockMode() == - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); - if(scanOp->reset_bounds(m_force_send)) - DBUG_RETURN(ndb_err(m_active_trans)); - - end_of_buffer -= reclength; - } - else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) - &&!scanOp->readTuples(lm, 0, parallelism, sorted, false, true) - &&!define_read_attrs(end_of_buffer-reclength, scanOp)) - { - m_multi_cursor= scanOp; - m_multi_range_cursor_result_ptr= end_of_buffer-reclength; - } - else - { - ERR_RETURN(scanOp ? scanOp->getNdbError() : - m_active_trans->getNdbError()); - } + if (m_multi_cursor) + { + scanOp= m_multi_cursor; + DBUG_ASSERT(scanOp->getSorted() == sorted); + DBUG_ASSERT(scanOp->getLockMode() == + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); + if(scanOp->reset_bounds(m_force_send)) + DBUG_RETURN(ndb_err(m_active_trans)); + + end_of_buffer -= reclength; + } + else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) + &&!scanOp->readTuples(lm, 0, parallelism, sorted, false, true) + &&!generate_scan_filter(m_cond_stack, scanOp) + &&!define_read_attrs(end_of_buffer-reclength, scanOp)) + { + m_multi_cursor= scanOp; + m_multi_range_cursor_result_ptr= end_of_buffer-reclength; + } + else + { + ERR_RETURN(scanOp ? scanOp->getNdbError() : + m_active_trans->getNdbError()); + } } + const key_range *keys[2]= { &multi_range_curr->start_key, - &multi_range_curr->end_key }; + &multi_range_curr->end_key }; if ((res= set_bounds(scanOp, keys, multi_range_curr-ranges))) - DBUG_RETURN(res); + DBUG_RETURN(res); break; } case(UNDEFINED_INDEX): @@ -5549,7 +5463,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) if (multi_range_curr->range_flag & UNIQUE_RANGE) { if (op->getNdbError().code == 0) - goto found_next; + goto found_next; op= m_active_trans->getNextCompletedOperation(op); m_multi_range_result_ptr += reclength; @@ -5560,49 +5474,49 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) DBUG_MULTI_RANGE(1); if ((res= fetch_next(m_multi_cursor)) == 0) { - DBUG_MULTI_RANGE(2); - range_no= m_multi_cursor->get_range_no(); - goto found; + DBUG_MULTI_RANGE(2); + range_no= m_multi_cursor->get_range_no(); + goto found; } else { - goto close_scan; + goto close_scan; } } else if (m_multi_cursor && multi_range_sorted) { if (m_active_cursor && (res= fetch_next(m_multi_cursor))) { - DBUG_MULTI_RANGE(3); - goto close_scan; + DBUG_MULTI_RANGE(3); + goto close_scan; } range_no= m_multi_cursor->get_range_no(); uint current_range_no= multi_range_curr - m_multi_ranges; if ((uint) range_no == current_range_no) { - DBUG_MULTI_RANGE(4); + DBUG_MULTI_RANGE(4); // return current row - goto found; + goto found; } else if (range_no > (int)current_range_no) { - DBUG_MULTI_RANGE(5); - // wait with current row - m_active_cursor= 0; - continue; + DBUG_MULTI_RANGE(5); + // wait with current row + m_active_cursor= 0; + continue; } else { - DBUG_MULTI_RANGE(6); - // First fetch from cursor - DBUG_ASSERT(range_no == -1); - if((res= m_multi_cursor->nextResult(true))) - { - goto close_scan; - } - multi_range_curr--; // Will be increased in for-loop - continue; + DBUG_MULTI_RANGE(6); + // First fetch from cursor + DBUG_ASSERT(range_no == -1); + if((res= m_multi_cursor->nextResult(true))) + { + goto close_scan; + } + multi_range_curr--; // Will be increased in for-loop + continue; } } else /** m_multi_cursor == 0 */ @@ -5637,10 +5551,10 @@ close_scan: * Read remaining ranges */ DBUG_RETURN(read_multi_range_first(multi_range_found_p, - multi_range_curr, - multi_range_end - multi_range_curr, - multi_range_sorted, - multi_range_buffer)); + multi_range_curr, + multi_range_end - multi_range_curr, + multi_range_sorted, + multi_range_buffer)); found: /** @@ -5696,8 +5610,8 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) char* ha_ndbcluster::update_table_comment( - /* out: table comment + additional */ - const char* comment)/* in: table comment defined by user */ + /* out: table comment + additional */ + const char* comment)/* in: table comment defined by user */ { uint length= strlen(comment); if(length > 64000 - 3) @@ -5728,8 +5642,8 @@ ha_ndbcluster::update_table_comment( } snprintf(str,fmt_len_plus_extra,fmt,comment, - length > 0 ? " ":"", - tab->getReplicaCount()); + length > 0 ? " ":"", + tab->getReplicaCount()); return str; } @@ -5807,7 +5721,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, /* Iterate through the open files list */ List_iterator_fast<NDB_SHARE> it(util_open_tables); - while (share= it++) + while ((share= it++)) { /* Split tab- and dbname */ char buf[FN_REFLEN]; @@ -5855,5 +5769,1045 @@ extern "C" pthread_handler_decl(ndb_util_thread_func, DBUG_RETURN(NULL); } +/* + Condition pushdown +*/ +/* + Push a condition to ndbcluster storage engine for evaluation + during table and index scans. The conditions will be stored on a stack + for possibly storing several conditions. The stack can be popped + by calling cond_pop, handler::extra(HA_EXTRA_RESET) (handler::reset()) + will clear the stack. + The current implementation supports arbitrary AND/OR nested conditions + with comparisons between columns and constants (including constant + expressions and function calls) and the following comparison operators: + =, !=, >, >=, <, <=, "is null", and "is not null". + + RETURN + NULL The condition was supported and will be evaluated for each + row found during the scan + cond The condition was not supported and all rows will be returned from + the scan for evaluation (and thus not saved on stack) +*/ +const +COND* +ha_ndbcluster::cond_push(const COND *cond) +{ + Ndb_cond_stack *ndb_cond = new Ndb_cond_stack(); + DBUG_ENTER("cond_push"); + DBUG_EXECUTE("where",print_where((COND *)cond, m_tabname);); + if (m_cond_stack) + ndb_cond->next= m_cond_stack; + else + ndb_cond->next= NULL; + m_cond_stack= ndb_cond; + + if (serialize_cond(cond, ndb_cond)) + { + DBUG_RETURN(NULL); + } + else + { + cond_pop(); + } + + DBUG_RETURN(cond); +} + +/* + Pop the top condition from the condition stack of the handler instance. +*/ +void +ha_ndbcluster::cond_pop() +{ + Ndb_cond_stack *ndb_cond_stack= m_cond_stack; + if (ndb_cond_stack) + { + m_cond_stack= ndb_cond_stack->next; + delete ndb_cond_stack; + } +} + +/* + Clear the condition stack +*/ +void +ha_ndbcluster::cond_clear() +{ + DBUG_ENTER("cond_clear"); + while (m_cond_stack) + cond_pop(); + + DBUG_VOID_RETURN; +} + +/* + Serialize the item tree into a linked list represented by Ndb_cond + for fast generation of NbdScanFilter. Adds information such as + position of fields that is not directly available in the Item tree. + Also checks if condition is supported. +*/ +void ndb_serialize_cond(const Item *item, void *arg) +{ + Ndb_cond_traverse_context *context= (Ndb_cond_traverse_context *) arg; + DBUG_ENTER("ndb_serialize_cond"); + + // Check if we are skipping arguments to a function to be evaluated + if (context->skip) + { + DBUG_PRINT("info", ("Skiping argument %d", context->skip)); + context->skip--; + switch(item->type()) { + case (Item::FUNC_ITEM): { + Item_func *func_item= (Item_func *) item; + context->skip+= func_item->argument_count(); + break; + } + case(Item::INT_ITEM): + case(Item::REAL_ITEM): + case(Item::STRING_ITEM): + case(Item::VARBIN_ITEM): + case(Item::DECIMAL_ITEM): + break; + default: + context->supported= FALSE; + break; + } + + DBUG_VOID_RETURN; + } + + if (context->supported) + { + Ndb_cond_stack *ndb_stack= context->stack_ptr; + Ndb_cond *prev_cond= context->cond_ptr; + Ndb_cond *curr_cond= context->cond_ptr= new Ndb_cond(); + if (!ndb_stack->ndb_cond) + ndb_stack->ndb_cond= curr_cond; + curr_cond->prev= prev_cond; + if (prev_cond) prev_cond->next= curr_cond; + + // Check for end of AND/OR expression + if (!item) + { + // End marker for condition group + DBUG_PRINT("info", ("End of condition group")); + curr_cond->ndb_item= new Ndb_item(NDB_END_COND); + } + else + switch(item->type()) { + case(Item::FIELD_ITEM): { + Item_field *field_item= (Item_field *) item; + Field *field= field_item->field; + enum_field_types type= field->type(); + /* + Check that the field is part of the table of the handler + instance and that we expect a field with of this result type. + */ + if (context->table == field->table) + { + const NDBTAB *tab= (const NDBTAB *) context->ndb_table; + DBUG_PRINT("info", ("FIELD_ITEM")); + DBUG_PRINT("info", ("table %s", tab->getName())); + DBUG_PRINT("info", ("column %s", field->field_name)); + DBUG_PRINT("info", ("result type %d", field->result_type())); + + // Check that we are expecting a field and with the correct + // result type + if(context->expecting(Item::FIELD_ITEM) && + (context->expecting_field_result(field->result_type()) || + // Date and year can be written as strings + (type == MYSQL_TYPE_TIME || + type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_YEAR || + type == MYSQL_TYPE_DATETIME) + ? context->expecting_field_result(STRING_RESULT) : true) + // Bit fields no yet supported in scan filter + && type != MYSQL_TYPE_BIT) + { + const NDBCOL *col= tab->getColumn(field->field_name); + DBUG_ASSERT(col); + curr_cond->ndb_item= new Ndb_item(field, col->getColumnNo()); + context->dont_expect(Item::FIELD_ITEM); + context->expect_no_field_result(); + if (context->expect_mask) + { + // We have not seen second argument yet + if (type == MYSQL_TYPE_TIME || + type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_YEAR || + type == MYSQL_TYPE_DATETIME) + { + context->expect_only(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + } + else + switch(field->result_type()) { + case(STRING_RESULT): + // Expect char string or binary string + context->expect_only(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_collation(field_item->collation.collation); + break; + case(REAL_RESULT): + context->expect_only(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + break; + case(INT_RESULT): + context->expect_only(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + break; + case(DECIMAL_RESULT): + context->expect_only(Item::DECIMAL_ITEM); + context->expect(Item::REAL_ITEM); + break; + default: + break; + } + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that field and string constant collations are the same + if ((field->result_type() == STRING_RESULT) && + !context->expecting_collation(item->collation.collation)) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + break; + } + } + DBUG_PRINT("info", ("Was not expecting field of type %u", + field->result_type())); + context->supported= FALSE; + break; + } + case(Item::FUNC_ITEM): { + Item_func *func_item= (Item_func *) item; + // Check that we expect a function or functional expression here + if (context->expecting(Item::FUNC_ITEM) || + func_item->functype() == Item_func::UNKNOWN_FUNC) + context->expect_nothing(); + else + { + // Did not expect function here + context->supported= FALSE; + break; + } + + switch(func_item->functype()) { + case(Item_func::EQ_FUNC): { + DBUG_PRINT("info", ("EQ_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::NE_FUNC): { + DBUG_PRINT("info", ("NE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::LT_FUNC): { + DBUG_PRINT("info", ("LT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::LE_FUNC): { + DBUG_PRINT("info", ("LE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::GE_FUNC): { + DBUG_PRINT("info", ("GE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::GT_FUNC): { + DBUG_PRINT("info", ("GT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::LIKE_FUNC): { + DBUG_PRINT("info", ("LIKE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + break; + } + case(Item_func::NOTLIKE_FUNC): { + DBUG_PRINT("info", ("NOTLIKE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + break; + } + case(Item_func::ISNULL_FUNC): { + DBUG_PRINT("info", ("ISNULL_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::ISNOTNULL_FUNC): { + DBUG_PRINT("info", ("ISNOTNULL_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case(Item_func::NOT_FUNC): { + DBUG_PRINT("info", ("NOT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + break; + } + case(Item_func::UNKNOWN_FUNC): { + DBUG_PRINT("info", ("UNKNOWN_FUNC %s", + func_item->const_item()?"const":"")); + DBUG_PRINT("info", ("result type %d", func_item->result_type())); + if (func_item->const_item()) + switch(func_item->result_type()) { + case(STRING_RESULT): { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::STRING_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + context->expect_collation(func_item->collation.collation); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that string result have correct collation + if (!context->expecting_collation(item->collation.collation)) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case(REAL_RESULT): { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::REAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case(INT_RESULT): { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::INT_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(INT_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case(DECIMAL_RESULT): { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::DECIMAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(DECIMAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + default: + break; + } + else + // Function does not return constant expression + context->supported= FALSE; + break; + } + default: { + DBUG_PRINT("info", ("Found func_item of type %d", + func_item->functype())); + context->supported= FALSE; + } + } + break; + } + case(Item::STRING_ITEM): + DBUG_PRINT("info", ("STRING_ITEM")); + if (context->expecting(Item::STRING_ITEM)) + { +#ifndef DBUG_OFF + char buff[256]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + Item_string *string_item= (Item_string *) item; + DBUG_PRINT("info", ("value \"%s\"", + string_item->val_str(&str)->ptr())); +#endif + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::STRING_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + context->expect_collation(item->collation.collation); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that we are comparing with a field with same collation + if (!context->expecting_collation(item->collation.collation)) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + } + else + context->supported= FALSE; + break; + case(Item::INT_ITEM): + DBUG_PRINT("info", ("INT_ITEM")); + if (context->expecting(Item::INT_ITEM)) + { + Item_int *int_item= (Item_int *) item; + DBUG_PRINT("info", ("value %d", int_item->value)); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::INT_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(INT_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case(Item::REAL_ITEM): + DBUG_PRINT("info", ("REAL_ITEM %s")); + if (context->expecting(Item::REAL_ITEM)) + { + Item_float *float_item= (Item_float *) item; + DBUG_PRINT("info", ("value %f", float_item->value)); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::REAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case(Item::VARBIN_ITEM): + DBUG_PRINT("info", ("VARBIN_ITEM")); + if (context->expecting(Item::VARBIN_ITEM)) + { +#ifndef DBUG_OFF + char buff[256]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + Item_hex_string *varbin_item= (Item_hex_string *) item; + DBUG_PRINT("info", ("value \"%s\"", + varbin_item->val_str(&str)->ptr())); +#endif + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::VARBIN_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case(Item::DECIMAL_ITEM): + DBUG_PRINT("info", ("DECIMAL_ITEM %s")); + if (context->expecting(Item::DECIMAL_ITEM)) + { + Item_decimal *decimal_item= (Item_decimal *) item; + DBUG_PRINT("info", ("value %f", decimal_item->val_real())); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::DECIMAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (context->expect_field_result_mask) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + context->expect_field_result(DECIMAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case(Item::COND_ITEM): { + Item_cond *cond_item= (Item_cond *) item; + + if (context->expecting(Item::COND_ITEM)) + switch(cond_item->functype()) { + case(Item_func::COND_AND_FUNC): + DBUG_PRINT("info", ("COND_AND_FUNC")); + curr_cond->ndb_item= new Ndb_item(cond_item->functype(), + cond_item); + break; + case(Item_func::COND_OR_FUNC): + DBUG_PRINT("info", ("COND_OR_FUNC")); + curr_cond->ndb_item= new Ndb_item(cond_item->functype(), + cond_item); + break; + default: + DBUG_PRINT("info", ("COND_ITEM %d", cond_item->functype())); + context->supported= FALSE; + break; + } + else + // Did not expect condition + context->supported= FALSE; + break; + } + default: { + DBUG_PRINT("info", ("Found item of type %d", item->type())); + context->supported= FALSE; + } + } + } + + DBUG_VOID_RETURN; +} + +bool +ha_ndbcluster::serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond) +{ + DBUG_ENTER("serialize_cond"); + Item *item= (Item *) cond; + Ndb_cond_traverse_context context(table, (void *)m_table, ndb_cond); + // Expect a logical expression + context.expect(Item::FUNC_ITEM); + context.expect(Item::COND_ITEM); + item->traverse_cond(&ndb_serialize_cond, (void *) &context, Item::PREFIX); + DBUG_PRINT("info", ("The pushed condition is %ssupported", (context.supported)?"":"not ")); + + DBUG_RETURN(context.supported); +} + +int +ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond, + NdbScanFilter *filter, + bool negated) +{ + DBUG_ENTER("build_scan_filter_predicate"); + switch(cond->ndb_item->type) { + case(NDB_FUNCTION): { + if (!cond->next) + break; + Ndb_item *a= cond->next->ndb_item; + Ndb_item *b, *field, *value= NULL; + switch(cond->ndb_item->argument_count()) { + case(1): + field= + (a->type == NDB_FIELD)? a : NULL; + break; + case(2): + if (!cond->next->next) + break; + b= cond->next->next->ndb_item; + value= + (a->type == NDB_VALUE)? a + : (b->type == NDB_VALUE)? b + : NULL; + field= + (a->type == NDB_FIELD)? a + : (b->type == NDB_FIELD)? b + : NULL; + break; + deafult: + break; + } + switch((negated) ? + Ndb_item::negate(cond->ndb_item->qualification.function_type) + : cond->ndb_item->qualification.function_type) + { + case(Item_func::EQ_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating EQ filter")); + if (filter->cmp(NdbScanFilter::COND_EQ, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::NE_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating NE filter")); + if (filter->cmp(NdbScanFilter::COND_NE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::LT_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating LT filter")); + if (filter->cmp(NdbScanFilter::COND_LT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating GT filter")); + if (filter->cmp(NdbScanFilter::COND_GT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::LE_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating LE filter")); + if (filter->cmp(NdbScanFilter::COND_LE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating GE filter")); + if (filter->cmp(NdbScanFilter::COND_GE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::GE_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating GE filter")); + if (filter->cmp(NdbScanFilter::COND_GE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating LE filter")); + if (filter->cmp(NdbScanFilter::COND_LE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::GT_FUNC): { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating GT filter")); + if (filter->cmp(NdbScanFilter::COND_GT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating LT filter")); + if (filter->cmp(NdbScanFilter::COND_LT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::LIKE_FUNC): { + if (!value || !field) break; + if ((value->qualification.value_type != Item::STRING_ITEM) && + (value->qualification.value_type != Item::VARBIN_ITEM)) + break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating LIKE filter: like(%d,%s,%d)", + field->get_field_no(), value->get_val(), + value->pack_length())); + if (filter->cmp(NdbScanFilter::COND_LIKE, + field->get_field_no(), + value->get_val(), + value->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::NOTLIKE_FUNC): { + if (!value || !field) break; + if ((value->qualification.value_type != Item::STRING_ITEM) && + (value->qualification.value_type != Item::VARBIN_ITEM)) + break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating NOTLIKE filter: notlike(%d,%s,%d)", + field->get_field_no(), value->get_val(), + value->pack_length())); + if (filter->cmp(NdbScanFilter::COND_NOT_LIKE, + field->get_field_no(), + value->get_val(), + value->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case(Item_func::ISNULL_FUNC): + if (!field) + break; + DBUG_PRINT("info", ("Generating ISNULL filter")); + if (filter->isnull(field->get_field_no()) == -1) + DBUG_RETURN(1); + cond= cond->next->next; + DBUG_RETURN(0); + case(Item_func::ISNOTNULL_FUNC): { + if (!field) + break; + DBUG_PRINT("info", ("Generating ISNOTNULL filter")); + if (filter->isnotnull(field->get_field_no()) == -1) + DBUG_RETURN(1); + cond= cond->next->next; + DBUG_RETURN(0); + } + default: + break; + } + break; + } + default: + break; + } + DBUG_PRINT("info", ("Found illegal condition")); + DBUG_RETURN(1); +} + +int +ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter) +{ + uint level=0; + bool negated= false; + + DBUG_ENTER("build_scan_filter_group"); + do + { + if (!cond) DBUG_RETURN(1); + switch(cond->ndb_item->type) { + case(NDB_FUNCTION): + switch(cond->ndb_item->qualification.function_type) { + case(Item_func::COND_AND_FUNC): { + level++; + DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NAND":"AND", + level)); + if ((negated) ? filter->begin(NdbScanFilter::NAND) + : filter->begin(NdbScanFilter::AND) == -1) + DBUG_RETURN(1); + negated= false; + cond= cond->next; + break; + } + case(Item_func::COND_OR_FUNC): { + level++; + DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NOR":"OR", + level)); + if ((negated) ? filter->begin(NdbScanFilter::NOR) + : filter->begin(NdbScanFilter::OR) == -1) + DBUG_RETURN(1); + negated= false; + cond= cond->next; + break; + } + case(Item_func::NOT_FUNC): { + cond= cond->next; + negated= true; + break; + } + default: + if (build_scan_filter_predicate(cond, filter, negated)) + DBUG_RETURN(1); + negated= false; + break; + } + break; + case(NDB_END_COND): + DBUG_PRINT("info", ("End of group %u", level)); + level--; + if (cond) cond= cond->next; + if (filter->end() == -1) + DBUG_RETURN(1); + break; + default: { + DBUG_PRINT("info", ("Illegal scan filter")); + } + } + } while (level > 0); + + DBUG_RETURN(0); +} + +int +ha_ndbcluster::build_scan_filter(Ndb_cond * &cond, NdbScanFilter *filter) +{ + bool simple_cond= TRUE; + DBUG_ENTER("build_scan_filter"); + + switch(cond->ndb_item->type) { + case(Item_func::COND_AND_FUNC): + case(Item_func::COND_OR_FUNC): + simple_cond= FALSE; + break; + default: + break; + } + if (simple_cond && filter->begin() == -1) + DBUG_RETURN(1); + if (build_scan_filter_group(cond, filter)) + DBUG_RETURN(1); + if (simple_cond && filter->end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +int +ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, + NdbScanOperation *op) +{ + DBUG_ENTER("generate_scan_filter"); + if (ndb_cond_stack) + { + NdbScanFilter filter(op); + bool multiple_cond= FALSE; + // Wrap an AND group around multiple conditions + if (ndb_cond_stack->next) { + multiple_cond= TRUE; + if (filter.begin() == -1) + DBUG_RETURN(1); + } + for (Ndb_cond_stack *stack= ndb_cond_stack; + (stack); + stack= stack->next) + { + Ndb_cond *cond= stack->ndb_cond; + + if (build_scan_filter(cond, &filter)) + { + DBUG_PRINT("info", ("build_scan_filter failed")); + DBUG_RETURN(1); + } + } + if (multiple_cond && filter.end() == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Empty stack")); + } + + DBUG_RETURN(0); +} #endif /* HAVE_NDBCLUSTER_DB */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 10ee568df69..133086a138c 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -32,6 +32,7 @@ class NdbOperation; // Forward declaration class NdbTransaction; // Forward declaration class NdbRecAttr; // Forward declaration class NdbScanOperation; +class NdbScanFilter; class NdbIndexScanOperation; class NdbBlob; @@ -63,11 +64,307 @@ typedef struct st_ndbcluster_share { ulonglong commit_count; } NDB_SHARE; +typedef enum ndb_item_type { + NDB_VALUE = 0, // Qualified more with Item::Type + NDB_FIELD = 1, // Qualified from table definition + NDB_FUNCTION = 2,// Qualified from Item_func::Functype + NDB_END_COND = 3 // End marker for condition group +} NDB_ITEM_TYPE; + +typedef union ndb_item_qualification { + Item::Type value_type; + enum_field_types field_type; // Instead of Item::FIELD_ITEM + Item_func::Functype function_type; // Instead of Item::FUNC_ITEM +} NDB_ITEM_QUALIFICATION; + +typedef struct ndb_item_field_value { + Field* field; + int column_no; +} NDB_ITEM_FIELD_VALUE; + +typedef union ndb_item_value { + const Item *item; + NDB_ITEM_FIELD_VALUE *field_value; +} NDB_ITEM_VALUE; + +struct negated_function_mapping +{ + Item_func::Functype pos_fun; + Item_func::Functype neg_fun; +}; + +static const negated_function_mapping neg_map[]= +{ + {Item_func::EQ_FUNC, Item_func::NE_FUNC}, + {Item_func::NE_FUNC, Item_func::EQ_FUNC}, + {Item_func::LT_FUNC, Item_func::GE_FUNC}, + {Item_func::LE_FUNC, Item_func::GT_FUNC}, + {Item_func::GT_FUNC, Item_func::LE_FUNC}, + {Item_func::GE_FUNC, Item_func::LT_FUNC}, + {Item_func::LIKE_FUNC, Item_func::NOTLIKE_FUNC}, + {Item_func::NOTLIKE_FUNC, Item_func::LIKE_FUNC}, + {Item_func::ISNULL_FUNC, Item_func::ISNOTNULL_FUNC}, + {Item_func::ISNOTNULL_FUNC, Item_func::ISNULL_FUNC}, + {Item_func::UNKNOWN_FUNC, Item_func::NOT_FUNC} +}; + +/* + This class is the construction element for serialization of Item tree + in condition pushdown. + An instance of Ndb_Item represents a constant, table field reference, + unary or binary comparison predicate, and start/end of AND/OR. + Instances of Ndb_Item are stored in a linked list implemented by Ndb_cond + class. + The order of elements produced by Ndb_cond::next corresponds to + depth-first traversal of the Item (i.e. expression) tree in prefix order. + AND and OR have arbitrary arity, so the end of AND/OR group is marked with + Ndb_item with type == NDB_END_COND. + NOT items represent negated conditions and generate NAND/NOR groups. +*/ +class Ndb_item { + public: + Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {}; + Ndb_item(NDB_ITEM_TYPE item_type, + NDB_ITEM_QUALIFICATION item_qualification, + const Item *item_value) + : type(item_type), qualification(item_qualification) + { + switch(item_type) { + case(NDB_VALUE): + value.item= item_value; + break; + case(NDB_FIELD): { + NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); + Item_field *field_item= (Item_field *) item_value; + field_value->field= field_item->field; + field_value->column_no= -1; // Will be fetched at scan filter generation + value.field_value= field_value; + break; + } + case(NDB_FUNCTION): + value.item= item_value; + break; + case(NDB_END_COND): + break; + } + }; + Ndb_item(Field *field, int column_no) : type(NDB_FIELD) + { + NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); + qualification.field_type= field->type(); + field_value->field= field; + field_value->column_no= column_no; + value.field_value= field_value; + }; + Ndb_item(Item_func::Functype func_type, const Item *item_value) + : type(NDB_FUNCTION) + { + qualification.function_type= func_type; + value.item= item_value; + }; + ~Ndb_item() + { + if (type == NDB_FIELD) + { + delete value.field_value; + value.field_value= NULL; + } + }; + + uint32 pack_length() + { + switch(type) { + case(NDB_VALUE): + if(qualification.value_type == Item::STRING_ITEM) + return value.item->str_value.length(); + break; + case(NDB_FIELD): + return value.field_value->field->pack_length(); + default: + break; + } + + return 0; + }; + + Field * get_field() { return value.field_value->field; }; + + int get_field_no() { return value.field_value->column_no; }; + + int argument_count() + { + return ((Item_func *) value.item)->argument_count(); + }; + + const char* get_val() + { + switch(type) { + case(NDB_VALUE): + if(qualification.value_type == Item::STRING_ITEM) + return value.item->str_value.ptr(); + break; + case(NDB_FIELD): + return value.field_value->field->ptr; + default: + break; + } + + return NULL; + }; + + void save_in_field(Ndb_item *field_item) + { + Field *field = field_item->value.field_value->field; + const Item *item= value.item; + + if (item && field) + ((Item *)item)->save_in_field(field, false); + }; + + static Item_func::Functype negate(Item_func::Functype fun) + { + uint i; + for (i=0; + fun != neg_map[i].pos_fun && + neg_map[i].pos_fun != Item_func::UNKNOWN_FUNC; + i++); + return neg_map[i].neg_fun; + }; + + NDB_ITEM_TYPE type; + NDB_ITEM_QUALIFICATION qualification; + private: + NDB_ITEM_VALUE value; +}; + +/* + This class implements a linked list used for storing a + serialization of the Item tree for condition pushdown. + */ +class Ndb_cond +{ + public: + Ndb_cond() : ndb_item(NULL), next(NULL), prev(NULL) {}; + ~Ndb_cond() + { + if (ndb_item) delete ndb_item; + ndb_item= NULL; + if (next) delete next; + next= prev= NULL; + }; + Ndb_item *ndb_item; + Ndb_cond *next; + Ndb_cond *prev; +}; + +/* + This class implements a stack for storing several conditions + for pushdown (represented as serialized Item trees using Ndb_cond). + The current implementation only pushes one condition, but is + prepared for handling several (C1 AND C2 ...) if the logic for + pushing conditions is extended in sql_select. +*/ +class Ndb_cond_stack +{ + public: + Ndb_cond_stack() : ndb_cond(NULL), next(NULL) {}; + ~Ndb_cond_stack() + { + if (ndb_cond) delete ndb_cond; + ndb_cond= NULL; + next= NULL; + }; + Ndb_cond *ndb_cond; + Ndb_cond_stack *next; +}; + +/* + This class is used for storing the context when traversing + the Item tree. It stores a reference to the table the condition + is defined on, the serialized representation being generated, + if the condition found is supported, and information what is + expected next in the tree inorder for the condition to be supported. +*/ +class Ndb_cond_traverse_context +{ + public: + Ndb_cond_traverse_context(TABLE *tab, void* ndb_tab, Ndb_cond_stack* stack) + : table(tab), ndb_table(ndb_tab), + supported(TRUE), stack_ptr(stack), cond_ptr(NULL), + expect_mask(0), expect_field_result_mask(0), skip(0), collation(NULL) + { + if (stack) + cond_ptr= stack->ndb_cond; + }; + void expect(Item::Type type) + { + expect_mask|= (1 << type); + }; + void dont_expect(Item::Type type) + { + expect_mask&= ~(1 << type); + }; + bool expecting(Item::Type type) + { + return (expect_mask & (1 << type)); + }; + void expect_nothing() + { + expect_mask= 0; + }; + void expect_only(Item::Type type) + { + expect_mask= 0; + expect(type); + }; + + void expect_field_result(Item_result result) + { + expect_field_result_mask|= (1 << result); + }; + bool expecting_field_result(Item_result result) + { + return (expect_field_result_mask & (1 << result)); + }; + void expect_no_field_result() + { + expect_field_result_mask= 0; + }; + void expect_only_field_result(Item_result result) + { + expect_field_result_mask= 0; + expect_field_result(result); + }; + void expect_collation(CHARSET_INFO* col) + { + collation= col; + }; + bool expecting_collation(CHARSET_INFO* col) + { + bool matching= (!collation) ? true : (collation == col); + collation= NULL; + + return matching; + }; + + TABLE* table; + void* ndb_table; + bool supported; + Ndb_cond_stack* stack_ptr; + Ndb_cond* cond_ptr; + uint expect_mask; + uint expect_field_result_mask; + uint skip; + CHARSET_INFO* collation; + +}; + /* Place holder for ha_ndbcluster thread specific data */ -class Thd_ndb { +class Thd_ndb +{ public: Thd_ndb(); ~Thd_ndb(); @@ -94,9 +391,9 @@ class ha_ndbcluster: public handler int index_init(uint index); int index_end(); int index_read(byte *buf, const byte *key, uint key_len, - enum ha_rkey_function find_flag); + enum ha_rkey_function find_flag); int index_read_idx(byte *buf, uint index, const byte *key, uint key_len, - enum ha_rkey_function find_flag); + enum ha_rkey_function find_flag); int index_next(byte *buf); int index_prev(byte *buf); int index_first(byte *buf); @@ -108,27 +405,26 @@ class ha_ndbcluster: public handler int rnd_pos(byte *buf, byte *pos); void position(const byte *record); int read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted); + const key_range *end_key, + bool eq_range, bool sorted); int read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted, - byte* buf); + const key_range *end_key, + bool eq_range, bool sorted, + byte* buf); int read_range_next(); /** * Multi range stuff */ int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, - KEY_MULTI_RANGE*ranges, uint range_count, - bool sorted, HANDLER_BUFFER *buffer); + KEY_MULTI_RANGE*ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer); int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); bool get_error_message(int error, String *buf); void info(uint); int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); - int reset(); int external_lock(THD *thd, int lock_type); int start_stmt(THD *thd); const char * table_type() const; @@ -144,8 +440,8 @@ class ha_ndbcluster: public handler int delete_table(const char *name); int create(const char *name, TABLE *form, HA_CREATE_INFO *info); THR_LOCK_DATA **store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type); + THR_LOCK_DATA **to, + enum thr_lock_type lock_type); bool low_byte_first() const; bool has_transactions(); @@ -158,11 +454,51 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); + + /* + Condition pushdown + */ + + /* + Push condition down to the table handler. + SYNOPSIS + cond_push() + cond Condition to be pushed. The condition tree must not be + modified by the by the caller. + RETURN + The 'remainder' condition that caller must use to filter out records. + NULL means the handler will not return rows that do not match the + passed condition. + NOTES + The pushed conditions form a stack (from which one can remove the + last pushed condition using cond_pop). + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + AND ... AND pushed_condN) + or less restrictive condition, depending on handler's capabilities. + + handler->extra(HA_EXTRA_RESET) call empties the condition stack. + Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the + condition stack. + The current implementation supports arbitrary AND/OR nested conditions + with comparisons between columns and constants (including constant + expressions and function calls) and the following comparison operators: + =, !=, >, >=, <, <=, like, "not like", "is null", and "is not null". + Negated conditions are supported by NOT which generate NAND/NOR groups. + */ + const COND *cond_push(const COND *cond); + /* + Pop the top condition from the condition stack of the handler instance. + SYNOPSIS + cond_pop() + Pops the top if condition stack, if stack is not empty + */ + void cond_pop(); + uint8 table_cache_type(); my_bool register_query_cache_table(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data); + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); private: int alter_table_name(const char *to); int drop_table(); @@ -182,17 +518,17 @@ private: int complemented_pk_read(const byte *old_data, byte *new_data); int peek_row(); int unique_index_read(const byte *key, uint key_len, - byte *buf); + byte *buf); int ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, byte* buf); + const key_range *end_key, + bool sorted, bool descending, byte* buf); int full_table_scan(byte * buf); int fetch_next(NdbScanOperation* op); int next_result(byte *buf); int define_read_attrs(byte* buf, NdbOperation* op); int filtered_scan(const byte *key, uint key_len, - byte *buf, - enum ha_rkey_function find_flag); + byte *buf, + enum ha_rkey_function find_flag); int close_scan(); void unpack_record(byte *buf); int get_ndb_lock_type(enum thr_lock_type type); @@ -202,9 +538,9 @@ private: void set_tabname(const char *pathname, char *tabname); bool set_hidden_key(NdbOperation*, - uint fieldnr, const byte* field_ptr); + uint fieldnr, const byte* field_ptr); int set_ndb_key(NdbOperation*, Field *field, - uint fieldnr, const byte* field_ptr); + uint fieldnr, const byte* field_ptr); int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0); int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*); friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); @@ -221,13 +557,37 @@ private: int ndb_err(NdbTransaction*); bool uses_blob_value(bool all_fields); - int write_ndb_file(); - char *update_table_comment(const char * comment); - private: + int write_ndb_file(); + int check_ndb_connection(); + void set_rec_per_key(); + void records_update(); + void no_uncommitted_rows_execute_failure(); + void no_uncommitted_rows_update(int); + void no_uncommitted_rows_init(THD *); + void no_uncommitted_rows_reset(THD *); + + /* + Condition pushdown + */ + void cond_clear(); + bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond); + int build_scan_filter_predicate(Ndb_cond* &cond, + NdbScanFilter* filter, + bool negated= false); + int build_scan_filter_group(Ndb_cond* &cond, + NdbScanFilter* filter); + int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter); + int generate_scan_filter(Ndb_cond_stack* cond_stack, + NdbScanOperation* op); + + friend int execute_commit(ha_ndbcluster*, NdbTransaction*); + friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*); + friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*); + NdbTransaction *m_active_trans; NdbScanOperation *m_active_cursor; void *m_table; @@ -263,7 +623,7 @@ private: bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - + Ndb_cond_stack *m_cond_stack; bool m_disable_multi_read; byte *m_multi_range_result_ptr; KEY_MULTI_RANGE *m_multi_ranges; @@ -272,18 +632,7 @@ private: NdbIndexScanOperation *m_multi_cursor; byte *m_multi_range_cursor_result_ptr; int setup_recattr(const NdbRecAttr*); - Ndb *get_ndb(); - void set_rec_per_key(); - void records_update(); - void no_uncommitted_rows_execute_failure(); - void no_uncommitted_rows_update(int); - void no_uncommitted_rows_init(THD *); - void no_uncommitted_rows_reset(THD *); - - friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*); - friend int execute_commit(ha_ndbcluster*, NdbTransaction*); - friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*); }; extern struct show_var_st ndb_status_variables[]; @@ -292,9 +641,9 @@ handlerton *ndbcluster_init(void); bool ndbcluster_end(void); int ndbcluster_discover(THD* thd, const char* dbname, const char* name, - const void** frmblob, uint* frmlen); + const void** frmblob, uint* frmlen); int ndbcluster_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir, List<char> *files); + const char *wild, bool dir, List<char> *files); int ndbcluster_table_exists(THD* thd, const char *db, const char *name); int ndbcluster_drop_database(const char* path); diff --git a/sql/handler.h b/sql/handler.h index 633252a24f1..d4e24bbb411 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -374,6 +374,9 @@ typedef struct st_foreign_key_info FOREIGN_KEY_INFO; typedef struct st_savepoint SAVEPOINT; extern ulong savepoint_alloc_size; +/* Forward declaration for condition pushdown to storage engine */ +typedef struct Item COND; + typedef struct st_ha_check_opt { ulong sort_buffer_size; @@ -456,7 +459,7 @@ public: enum {NONE=0, INDEX, RND} inited; bool auto_increment_column_changed; bool implicit_emptied; /* Can be !=0 only if HEAP */ - + const COND *pushed_cond; handler(TABLE *table_arg) :table(table_arg), ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0), @@ -465,7 +468,8 @@ public: create_time(0), check_time(0), update_time(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), block_size(0), - raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0) + raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0), + pushed_cond(NULL) {} virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ } int ha_open(const char *name, int mode, int test_if_locked); @@ -713,7 +717,6 @@ public: *engine_callback= 0; return 1; } - /* RETURN true Primary key (if there is one) is clustered key covering all fields @@ -725,6 +728,40 @@ public: { return memcmp(ref1, ref2, ref_length); } + + /* + Condition pushdown to storage engines + */ + + /* + Push condition down to the table handler. + SYNOPSIS + cond_push() + cond Condition to be pushed. The condition tree must not be + modified by the by the caller. + RETURN + The 'remainder' condition that caller must use to filter out records. + NULL means the handler will not return rows that do not match the + passed condition. + NOTES + The pushed conditions form a stack (from which one can remove the + last pushed condition using cond_pop). + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + AND ... AND pushed_condN) + or less restrictive condition, depending on handler's capabilities. + + handler->extra(HA_EXTRA_RESET) call empties the condition stack. + Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the + condition stack. + */ + virtual const COND *cond_push(const COND *cond) { return cond; }; + /* + Pop the top condition from the condition stack of the handler instance. + SYNOPSIS + cond_pop() + Pops the top if condition stack, if stack is not empty + */ + virtual void cond_pop() { return; }; }; /* Some extern variables used with handlers */ diff --git a/sql/item.h b/sql/item.h index 2b719840357..4082e77efad 100644 --- a/sql/item.h +++ b/sql/item.h @@ -113,6 +113,8 @@ public: typedef bool (Item::*Item_processor)(byte *arg); typedef Item* (Item::*Item_transformer) (byte *arg); +typedef void (*Item_cond_traverser) (const Item *item, void *arg); + class Item { Item(const Item &); /* Prevent use of these */ void operator=(Item &); @@ -123,7 +125,7 @@ public: static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) {} - enum Type {FIELD_ITEM, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, + enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM, COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM, PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, @@ -132,6 +134,8 @@ public: PARAM_ITEM, TRIGGER_FIELD_ITEM, DECIMAL_ITEM}; enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; + + enum traverse_order { POSTFIX, PREFIX }; /* str_values's main purpose is to be used to cache the value in @@ -393,6 +397,13 @@ public: { return (this->*transformer)(arg); } + + virtual void traverse_cond(Item_cond_traverser traverser, + void *arg, + traverse_order order = POSTFIX) + { + (*traverser)(this, arg); + } virtual bool remove_dependence_processor(byte * arg) { return 0; } virtual bool remove_fixed(byte * arg) { fixed= 0; return 0; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 3884cce8451..5d764a734bc 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2363,6 +2363,30 @@ Item *Item_cond::transform(Item_transformer transformer, byte *arg) return Item_func::transform(transformer, arg); } +void Item_cond::traverse_cond(Item_cond_traverser traverser, + void *arg, + traverse_order order) +{ + List_iterator<Item> li(list); + Item *item; + + switch(order) { + case(PREFIX): + (*traverser)(this, arg); + while ((item= li++)) + { + item->traverse_cond(traverser, arg, order); + } + (*traverser)(NULL, arg); + break; + case(POSTFIX): + while ((item= li++)) + { + item->traverse_cond(traverser, arg, order); + } + (*traverser)(this, arg); + } +} /* Move SUM items out from item tree and replace with reference diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 717bcbca7d5..3ec7131aac1 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1028,6 +1028,9 @@ public: void copy_andor_arguments(THD *thd, Item_cond *item); bool walk(Item_processor processor, byte *arg); Item *transform(Item_transformer transformer, byte *arg); + void traverse_cond(Item_cond_traverser, + void *arg, + traverse_order order = POSTFIX); void neg_arguments(THD *thd); }; diff --git a/sql/item_func.cc b/sql/item_func.cc index 3742a13e0bc..19228108bfc 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -360,6 +360,32 @@ bool Item_func::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } +void Item_func::traverse_cond(Item_cond_traverser traverser, + void *argument, + traverse_order order) +{ + if (arg_count) + { + Item **arg,**arg_end; + + switch (order) { + case(PREFIX): + (*traverser)(this, argument); + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + (*arg)->traverse_cond(traverser, argument, order); + } + break; + case (POSTFIX): + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + (*arg)->traverse_cond(traverser, argument, order); + } + (*traverser)(this, argument); + } + } +} + /* diff --git a/sql/item_func.h b/sql/item_func.h index 48fc278ccde..b3fa73bb15b 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -163,6 +163,9 @@ public: uint flags= 0); bool walk(Item_processor processor, byte *arg); Item *transform(Item_transformer transformer, byte *arg); + void traverse_cond(Item_cond_traverser traverser, + void * arg, + traverse_order order = POSTFIX); }; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 687b1a4d36b..401dcad8395 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -296,8 +296,13 @@ my_bool opt_log_slave_updates= 0; my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster; #ifdef HAVE_NDBCLUSTER_DB const char *opt_ndbcluster_connectstring= 0; +const char *opt_ndb_connectstring= 0; +char opt_ndb_constrbuf[1024]; +unsigned opt_ndb_constrbuf_len= 0; my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; -ulong opt_ndb_cache_check_time= 0; +ulong opt_ndb_cache_check_time; +const char *opt_ndb_mgmd; +ulong opt_ndb_nodeid; #endif my_bool opt_readonly, use_temp_pool, relay_log_purge; my_bool opt_sync_bdb_logs, opt_sync_frm; @@ -4127,9 +4132,11 @@ enum options_mysqld OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, OPT_INNODB, OPT_ISAM, + OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, + OPT_NDB_MGMD, OPT_NDB_NODEID, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, @@ -4386,6 +4393,12 @@ Disable with --skip-bdb (will save memory).", {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.", (gptr*) &opt_do_pstack, (gptr*) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"engine-condition-pushdown", + OPT_ENGINE_CONDITION_PUSHDOWN, + "Push supported query conditions to the storage engine.", + (gptr*) &global_system_variables.engine_condition_pushdown, + (gptr*) &global_system_variables.engine_condition_pushdown, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0, GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.", @@ -4629,9 +4642,19 @@ Disable with --skip-ndbcluster (will save memory).", #ifdef HAVE_NDBCLUSTER_DB {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.", - (gptr*) &opt_ndbcluster_connectstring, - (gptr*) &opt_ndbcluster_connectstring, + (gptr*) &opt_ndb_connectstring, + (gptr*) &opt_ndb_connectstring, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-mgmd-host", OPT_NDB_MGMD, + "Set host and port for ndb_mgmd. Syntax: hostname[:port]", + (gptr*) &opt_ndb_mgmd, + (gptr*) &opt_ndb_mgmd, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-nodeid", OPT_NDB_NODEID, + "Nodeid for this mysqlserver in the cluster.", + (gptr*) &opt_ndb_nodeid, + (gptr*) &opt_ndb_nodeid, + 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, "Specify number of autoincrement values that are prefetched.", (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, @@ -6411,6 +6434,31 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), have_ndbcluster= SHOW_OPTION_DISABLED; #endif break; +#ifdef HAVE_NDBCLUSTER_DB + case OPT_NDB_MGMD: + case OPT_NDB_NODEID: + { + int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"", + optid == OPT_NDB_NODEID ? "nodeid=" : "", + argument); + opt_ndb_constrbuf_len+= len; + } + /* fall through to add the connectstring to the end + * and set opt_ndbcluster_connectstring + */ + case OPT_NDB_CONNECTSTRING: + if (opt_ndb_connectstring && opt_ndb_connectstring[0]) + my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"", + opt_ndb_connectstring); + else + opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0; + opt_ndbcluster_connectstring= opt_ndb_constrbuf; + break; +#endif case OPT_INNODB: #ifdef HAVE_INNOBASE_DB if (opt_innodb) diff --git a/sql/set_var.cc b/sql/set_var.cc index 6ea1747ec07..fc0802bf185 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -402,6 +402,11 @@ sys_var_long_ptr sys_innodb_thread_concurrency("innodb_thread_concurrency", &srv_thread_concurrency); #endif +/* Condition pushdown to storage engine */ +sys_var_thd_bool +sys_engine_condition_pushdown("engine_condition_pushdown", + &SV::engine_condition_pushdown); + #ifdef HAVE_NDBCLUSTER_DB /* ndb thread specific variable settings */ sys_var_thd_ulong @@ -688,6 +693,7 @@ sys_var *sys_variables[]= &sys_innodb_thread_sleep_delay, &sys_innodb_thread_concurrency, #endif + &sys_engine_condition_pushdown, #ifdef HAVE_NDBCLUSTER_DB &sys_ndb_autoincrement_prefetch_sz, &sys_ndb_force_send, @@ -868,6 +874,8 @@ struct show_var_st init_vars[]= { #ifdef __NT__ {"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL}, #endif + {sys_engine_condition_pushdown.name, + (char*) &sys_engine_condition_pushdown, SHOW_SYS}, #ifdef HAVE_NDBCLUSTER_DB {sys_ndb_autoincrement_prefetch_sz.name, (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, diff --git a/sql/sql_class.h b/sql/sql_class.h index 6c1280366f4..2cd525f54bc 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -566,6 +566,7 @@ struct system_variables my_bool low_priority_updates; my_bool new_mode; my_bool query_cache_wlock_invalidate; + my_bool engine_condition_pushdown; #ifdef HAVE_REPLICATION ulong sync_replication; ulong sync_replication_slave_id; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index db6c5ce530d..2f38f96c976 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1863,7 +1863,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, mysqld_list_fields(thd,&table_list,fields); thd->lex->unit.cleanup(); thd->cleanup_after_query(); +#ifdef HAVE_INNOBASE_DB release_local_lock(thd, locked_tables, old_innodb_table_locks); +#else + release_local_lock(thd, locked_tables, false); +#endif break; } #endif @@ -4602,8 +4606,11 @@ cleanup: if (thd->lock == thd->locked_tables) thd->lock= 0; } - +#ifdef HAVE_INNOBASE_DB release_local_lock(thd, locked_tables, old_innodb_table_locks); +#else + release_local_lock(thd, locked_tables, false); +#endif DBUG_RETURN(res || thd->net.report_error); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2c1d5f03aa2..499d3cff3f1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -5303,6 +5303,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0))) DBUG_RETURN(1); tab->select_cond=sel->cond=tmp; + if (current_thd->variables.engine_condition_pushdown) + { + tab->table->file->pushed_cond= NULL; + /* Push condition to handler */ + if (!tab->table->file->cond_push(tmp)) + tab->table->file->pushed_cond= tmp; + } } else tab->select_cond= sel->cond= NULL; @@ -5424,6 +5431,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) join->thd->memdup((gptr) sel, sizeof(SQL_SELECT)); tab->cache.select->cond=tmp; tab->cache.select->read_tables=join->const_table_map; + if (current_thd->variables.engine_condition_pushdown && + (!tab->table->file->pushed_cond)) + { + /* Push condition to handler */ + if (!tab->table->file->cond_push(tmp)) + tab->table->file->pushed_cond= tmp; + } } } } |