summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwax@mysql.com <>2005-02-08 18:51:42 +0500
committerwax@mysql.com <>2005-02-08 18:51:42 +0500
commit7d1af16c2aa3436f714fda2045af46ae5776d60f (patch)
treea0d98abfc1c068202b5a8081ba0c2ee7529e5171
parentca2f539273279f4881db88e9aaadc6f0f680c5b6 (diff)
parentbd50066cfe1c8020e15c503baff5b47bf27704d3 (diff)
downloadmariadb-git-7d1af16c2aa3436f714fda2045af46ae5776d60f.tar.gz
Merge mysql.com:/home/wax/mysql/mysql-4.1
into mysql.com:/home/wax/mysql/mysql-4.1test2
-rwxr-xr-xBUILD/compile-solaris-sparc-purify8
-rw-r--r--BitKeeper/etc/logging_ok2
-rwxr-xr-xBuild-tools/Bootstrap2
-rwxr-xr-xBuild-tools/Do-compile26
-rw-r--r--Build-tools/Do-solaris-pkg183
-rw-r--r--acinclude.m423
-rw-r--r--client/mysqladmin.cc1
-rw-r--r--client/mysqlcheck.c2
-rw-r--r--client/mysqldump.c36
-rw-r--r--configure.in29
-rw-r--r--extra/perror.c14
-rw-r--r--include/ft_global.h2
-rw-r--r--include/my_global.h6
-rw-r--r--include/mysql.h11
-rw-r--r--innobase/include/univ.i4
-rw-r--r--myisam/ft_boolean_search.c7
-rw-r--r--myisam/ft_static.c5
-rw-r--r--myisam/ftdefs.h2
-rw-r--r--mysql-test/include/have_ndb.inc1
-rw-r--r--mysql-test/lib/mtr_cases.pl270
-rw-r--r--mysql-test/lib/mtr_io.pl3
-rw-r--r--mysql-test/lib/mtr_process.pl672
-rw-r--r--mysql-test/lib/mtr_report.pl25
-rwxr-xr-xmysql-test/mysql-test-run.pl607
-rw-r--r--mysql-test/mysql-test-run.sh26
-rw-r--r--mysql-test/r/compare.result3
-rw-r--r--mysql-test/r/drop_temp_table.result16
-rw-r--r--mysql-test/r/func_group.result12
-rw-r--r--mysql-test/r/func_misc.result21
-rw-r--r--mysql-test/r/func_str.result19
-rw-r--r--mysql-test/r/group_by.result45
-rw-r--r--mysql-test/r/insert_select.result.es12
-rw-r--r--mysql-test/r/limit.result9
-rw-r--r--mysql-test/r/ndb_basic.result34
-rw-r--r--mysql-test/r/subselect.result12
-rw-r--r--mysql-test/r/subselect_innodb.result12
-rw-r--r--mysql-test/r/type_date.result8
-rw-r--r--mysql-test/r/type_float.result.es9
-rw-r--r--mysql-test/r/union.result36
-rw-r--r--mysql-test/r/update.result7
-rw-r--r--mysql-test/r/user_var.result4
-rw-r--r--mysql-test/t/compare.test2
-rw-r--r--mysql-test/t/drop_temp_table.test3
-rw-r--r--mysql-test/t/func_group.test14
-rw-r--r--mysql-test/t/func_misc.test15
-rw-r--r--mysql-test/t/func_str.test18
-rw-r--r--mysql-test/t/group_by.test28
-rw-r--r--mysql-test/t/limit.test10
-rw-r--r--mysql-test/t/ndb_autodiscover.test10
-rw-r--r--mysql-test/t/ndb_basic.test38
-rw-r--r--mysql-test/t/ndb_restore.test6
-rw-r--r--mysql-test/t/subselect.test14
-rw-r--r--mysql-test/t/subselect_innodb.test14
-rw-r--r--mysql-test/t/type_date.test7
-rw-r--r--mysql-test/t/union.test35
-rw-r--r--mysql-test/t/update.test9
-rw-r--r--mysys/my_handler.c6
-rw-r--r--ndb/include/ndbapi/NdbConnection.hpp2
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp6
-rw-r--r--ndb/include/portlib/NdbThread.h2
-rw-r--r--ndb/src/common/portlib/NdbPortLibTest.cpp16
-rw-r--r--ndb/src/common/portlib/NdbThread.c40
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp5
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp28
-rw-r--r--ndb/src/common/util/SocketServer.cpp9
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp2
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp19
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp4
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp3
-rw-r--r--ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp4
-rw-r--r--ndb/src/kernel/vm/FastScheduler.cpp33
-rw-r--r--ndb/src/kernel/vm/ThreadConfig.cpp3
-rw-r--r--ndb/src/kernel/vm/WatchDog.cpp3
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp6
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp14
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp10
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp4
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp2
-rw-r--r--ndb/src/ndbapi/TransporterFacade.cpp10
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp3
-rw-r--r--ndb/test/include/HugoOperations.hpp2
-rw-r--r--ndb/test/include/NDBT_Test.hpp4
-rw-r--r--ndb/test/ndbapi/benchronja.cpp4
-rw-r--r--ndb/test/ndbapi/flexAsynch.cpp3
-rw-r--r--ndb/test/ndbapi/flexBench.cpp5
-rw-r--r--ndb/test/ndbapi/flexHammer.cpp5
-rw-r--r--ndb/test/ndbapi/flexScan.cpp3
-rw-r--r--ndb/test/ndbapi/flexTT.cpp3
-rw-r--r--ndb/test/ndbapi/flexTimedAsynch.cpp3
-rw-r--r--ndb/test/ndbapi/flex_bench_mysql.cpp29
-rw-r--r--ndb/test/ndbapi/mainAsyncGenerator.cpp2
-rw-r--r--ndb/test/ndbapi/testOperations.cpp369
-rw-r--r--ndb/test/ndbapi/testScan.cpp30
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt4
-rw-r--r--ndb/test/src/HugoOperations.cpp11
-rw-r--r--ndb/test/src/HugoTransactions.cpp310
-rw-r--r--ndb/test/src/NDBT_Test.cpp13
-rw-r--r--ndb/test/tools/transproxy.cpp2
-rw-r--r--scripts/Makefile.am3
-rw-r--r--scripts/make_binary_distribution.sh14
-rw-r--r--scripts/mysql_config.sh7
-rw-r--r--sql/field.cc47
-rw-r--r--sql/field.h1
-rw-r--r--sql/ha_innodb.cc23
-rw-r--r--sql/ha_myisam.h8
-rw-r--r--sql/ha_ndbcluster.cc50
-rw-r--r--sql/handler.h3
-rw-r--r--sql/item.cc65
-rw-r--r--sql/item.h4
-rw-r--r--sql/item_cmpfunc.cc50
-rw-r--r--sql/item_func.cc24
-rw-r--r--sql/item_row.cc11
-rw-r--r--sql/item_strfunc.cc24
-rw-r--r--sql/item_strfunc.h1
-rw-r--r--sql/sql_base.cc66
-rw-r--r--sql/sql_parse.cc3
-rw-r--r--sql/sql_string.h10
-rw-r--r--sql/sql_table.cc9
-rw-r--r--sql/sql_union.cc14
-rw-r--r--sql/sql_update.cc3
-rw-r--r--sql/sql_yacc.yy9
-rw-r--r--sql/unireg.h2
-rw-r--r--strings/ctype-big5.c4
-rw-r--r--strings/ctype-bin.c4
-rw-r--r--strings/ctype-gbk.c4
-rw-r--r--strings/ctype-latin1.c4
-rw-r--r--strings/ctype-mb.c4
-rw-r--r--strings/ctype-simple.c6
-rw-r--r--strings/ctype-sjis.c4
-rw-r--r--strings/ctype-tis620.c4
-rw-r--r--strings/ctype-ucs2.c4
-rw-r--r--strings/ctype-utf8.c4
-rw-r--r--support-files/Makefile.am9
-rw-r--r--support-files/mysql.spec.sh93
-rw-r--r--support-files/ndb-config-2-node.ini.sh43
-rw-r--r--vio/viosocket.c2
-rw-r--r--vio/viossl.c2
-rw-r--r--zlib/Makefile.am2
139 files changed, 3023 insertions, 1109 deletions
diff --git a/BUILD/compile-solaris-sparc-purify b/BUILD/compile-solaris-sparc-purify
index 0e530f75b60..5f5ba81396f 100755
--- a/BUILD/compile-solaris-sparc-purify
+++ b/BUILD/compile-solaris-sparc-purify
@@ -61,7 +61,7 @@ purifying_binaries ()
if [ -n "$cxxfilt" ] ; then
opts="$opts -demangle-program=$cxxfilt"
fi
- opts="$opts -best-effort g++"
+ opts="$opts -best-effort"
back=`pwd`
cd $dir
@@ -76,17 +76,17 @@ purifying_binaries ()
fi
if [ -n "$mode" -a $mode = purify ] ; then
- gmake CXXLD="purify $opts" $target
+ gmake CCLD="purify $opts gcc" CXXLD="purify $opts g++" $target
mv $binary $binary-purify
fi
if [ -n "$mode" -a $mode = quantify ] ; then
- gmake CXXLD="quantify $opts" $target
+ gmake CCLD="quantify $opts gcc" CXXLD="quantify $opts g++" $target
mv $binary $binary-quantify
fi
if [ -n "$mode" -a $mode = purecov ] ; then
- gmake CXXLD="purecov $opts" $target
+ gmake CCLD="purecov $opts gcc" CXXLD="purecov $opts g++" $target
mv $binary $binary-purecov
fi
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 6ccc886e161..59882283d5d 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -119,6 +119,7 @@ miguel@light.
miguel@light.local
miguel@sartre.local
mikael@mc04.(none)
+mikael@orca.ndb.mysql.com
mikron@c-fb0ae253.1238-1-64736c10.cust.bredbandsbolaget.se
mikron@mikael-ronstr-ms-dator.local
mleich@mysql.com
@@ -180,6 +181,7 @@ ram@gw.mysql.r18.ru
ram@gw.udmsearch.izhnet.ru
ram@mysql.r18.ru
ram@ram.(none)
+ramil@mysql.com
ranger@regul.home.lan
rburnett@build.mysql.com
reggie@bob.(none)
diff --git a/Build-tools/Bootstrap b/Build-tools/Bootstrap
index 827eb4022d7..8b769dca3c4 100755
--- a/Build-tools/Bootstrap
+++ b/Build-tools/Bootstrap
@@ -214,7 +214,7 @@ if (-d $target_dir)
@stat= stat("$target_dir/configure.in");
my $mtime= $stat[9];
my ($sec,$min,$hour,$mday,$mon,$year) = localtime($mtime);
- my $mtime= sprintf("%04d%-02d-%02d-%02d:%02d", $year+1900, $mon+1, $mday, $hour, $min);
+ my $mtime= sprintf("%04d-%02d-%02d-%02d:%02d", $year+1900, $mon+1, $mday, $hour, $min);
&logger("Renaming $target_dir to $target_dir-$mtime");
$command= "mv ";
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 4034533f2eb..1c3ff01796f 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -7,7 +7,7 @@ use Sys::Hostname;
@config_options= ();
@make_options= ();
-$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env="";
+$opt_comment=$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env="";
$opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix="";
$opt_tmp=$opt_version_suffix="";
$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_one_error=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_archive=$opt_with_cluster=$opt_with_csv=$opt_with_example=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0;
@@ -17,6 +17,7 @@ GetOptions(
"bdb",
"build-thread=i",
"bundled-zlib",
+ "comment=s",
"config-env=s" => \@config_env,
"config-extra-env=s" => \@config_extra_env,
"config-options=s" => \@config_options,
@@ -110,6 +111,7 @@ $log="$pwd/Logs/$host-$major.$minor$opt_version_suffix.log";
$opt_distribution =~ /(mysql[^\/]*)\.tar/;
$ver=$1;
$gcc_version=which("gcc");
+$opt_comment= "Official MySQL$opt_version_suffix binary" unless $opt_comment;
if (defined($gcc_version) && ! $opt_config_env)
{
$tmp=`$gcc_version -v 2>&1`;
@@ -303,7 +305,7 @@ if ($opt_stage <= 1)
}
$prefix="/usr/local/mysql";
- check_system("$opt_config_env ./configure --prefix=$prefix --localstatedir=$prefix/data --libexecdir=$prefix/bin --with-comment=\"Official MySQL$opt_version_suffix binary\" --with-extra-charsets=complex --with-server-suffix=\"$opt_version_suffix\" --enable-thread-safe-client --enable-local-infile $opt_config_options","Thank you for choosing MySQL");
+ check_system("$opt_config_env ./configure --prefix=$prefix --localstatedir=$prefix/data --libexecdir=$prefix/bin --with-comment=\"$opt_comment\" --with-extra-charsets=complex --with-server-suffix=\"$opt_version_suffix\" --enable-thread-safe-client --enable-local-infile $opt_config_options","Thank you for choosing MySQL");
if (-d "$pwd/$host/include-mysql")
{
safe_system("cp -r $pwd/$host/include-mysql/* $pwd/$host/$ver/include");
@@ -530,6 +532,10 @@ When running several Do-compile runs in parallel, each build
should have its own thread ID, so running the test suites
does not cause conflicts with duplicate TCP port numbers.
+--comment=<comment>
+Replace the default compilation comment that is embedded into
+the mysqld binary.
+
--config-env=<environment for configure>
To set up the environment, like 'CC=cc CXX=gcc CXXFLAGS=-O3'
@@ -684,16 +690,20 @@ sub abort
if ($opt_user)
{
- $mail_header_file="$opt_tmp/do-command.$$";
- open(TMP,">$mail_header_file");
+ # Take the last 40 lines of the build log
+ open(LOG, "$log") or die $!;
+ my @log= <LOG>;
+ close LOG;
+ splice @log => 0, -40;
+ my $mail_file="$opt_tmp/do-command.$$";
+ open(TMP,">$mail_file") or die $!;
print TMP "From: mysqldev\@$full_host_name\n";
print TMP "To: $email\n";
print TMP "Subject: $host($uname): $ver$opt_version_suffix compilation failed\n\n";
+ print TMP @log;
close TMP;
- system("tail -n 40 $log > $log.mail");
- system("cat $mail_header_file $log.mail | $sendmail -t -f $email");
- unlink($mail_header_file);
- unlink("$log.mail");
+ system("$sendmail -t -f $email < $mail_file");
+ unlink($mail_file);
}
exit 1;
}
diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg
new file mode 100644
index 00000000000..685a1f0923b
--- /dev/null
+++ b/Build-tools/Do-solaris-pkg
@@ -0,0 +1,183 @@
+#!/usr/bin/perl
+#
+# Script to create Solaris packages
+#
+$INTERACTIVE= 0;
+chomp ($hostname= `hostname`);
+$find = "/usr/bin/find";
+$pkgproto = "/usr/bin/pkgproto";
+$pkgmk = "/usr/bin/pkgmk -o";
+$pkgtrans = "/usr/bin/pkgtrans";
+$temp = "/tmp/prototype$$";
+$prototype = "prototype";
+$pkginfo = "pkginfo";
+($gid ,$pkg ,$uid ,$userInfo ,$email ,$quota ,$group ,$passwd
+,$category ,$userHome ,$vendor ,$loginShell ,$pstamp ,$basedir)=();
+
+$fullname = shift @ARGV;
+$fullname or die "No package name was specified";
+-d $fullname or die "That directory is not present!";
+
+$fullname =~ s,/+$,,; # Remove ending slash if any
+
+$pkgdir= `cd ../$hostname; pwd`;
+$pwd = `pwd`;
+if ($pwd =~ '\/usr\/local') {
+ $pwd = $`;
+}
+die "Wrong location, please cd to <PKGBASE>/usr/local/ and run again.\n"
+ if ($pwd eq "");
+
+system ("$find . -print | $pkgproto > $temp");
+open (PREPROTO,"<$temp") or die "Unable to read prototype information ($!)\n";
+open (PROTO,">$prototype") or die "Unable to write file prototype ($!)\n";
+print PROTO "i pkginfo=./$pkginfo\n";
+while (<PREPROTO>) {
+ # Read the prototype information from /tmp/prototype$$
+ chomp;
+ $thisline = $_;
+ if ($thisline =~ " prototype "
+ or $thisline =~ " pkginfo ") {
+ # We don't need that line
+ } elsif ($thisline =~ "^[fd] ") {
+ # Change the ownership for files and directories
+ ($dir, $none, $file, $mode, $user, $group) = split / /,$thisline;
+ print PROTO "$dir $none $file $mode bin bin\n";
+ } else {
+ # Symlinks and other stuff should be printed as well ofcourse
+ print PROTO "$thisline\n";
+ }
+}
+close PROTO;
+close PREPROTO;
+
+# Clean up
+unlink $temp or warn "Unable to remove tempfile ($!)\n";
+
+# Now we can start building the package
+#
+# First get some info
+
+$fullname =~ /^((mysql)-.+)-([\d\.]+)-.+$/
+ or die "This name is not what I expected - \"$fullname\"";
+
+$default{"name"}= $2;
+$default{"version"}= $3;
+$default{"pkg"}= $1;
+$default{"arch"} = `uname -m`;
+chomp $default{"arch"};
+$default{"category"}= "application";
+$default{"vendor"}= "MySQL AB";
+$default{"email"}= "build\@mysql.com";
+$default{"pstamp"}= "MySQL AB Build Engineers";
+$os = `uname -r`;
+$os =~ '\.';
+$os = "sol$'";
+chomp $os;
+$default{"basedir"}= "/usr/local";
+$default{"packagename"}= "${fullname}.pkg";
+
+# Check for correctness of guessed values by userinput
+
+%questions = (
+ pkg => "Please give the name for this package",
+ name => "Now enter the real name for this package",
+ arch => "What architecture did you build the package on?",
+ version => "Enter the version number of the package",
+ category => "What category does this package belong to?",
+ vendor => "Who is the vendor of this package?",
+ email => "Enter the email adress for contact",
+ pstamp => "Enter your own name",
+ basedir => "What is the basedir this package will install into?",
+ packagename => "How should I call the packagefile?",
+);
+
+@vars = qw(pkg name arch version category vendor email pstamp basedir
+ packagename);
+foreach $varname (@vars) {
+ getvar_noq($varname);
+}
+
+if ($INTERACTIVE) {
+ while (!&chkvar()) {
+ print "\n";
+ foreach $varname (@vars) {
+ getvar($varname);
+ }
+ @vars = qw(pkg name arch version category vendor email pstamp basedir
+ packagename);
+ }
+}
+$classes = "none";
+
+# Create the pkginfo file
+
+print "\nNow creating $pkginfo file\n";
+open (PKGINFO,">$pkginfo") || die "Unable to open $pkginfo for writing ($!)\n";
+print PKGINFO "PKG=\"$pkg\"\n";
+print PKGINFO "NAME=\"$name\"\n";
+print PKGINFO "ARCH=\"$arch\"\n";
+print PKGINFO "VERSION=\"$version\"\n";
+print PKGINFO "CATEGORY=\"$category\"\n";
+print PKGINFO "VENDOR=\"$vendor\"\n";
+print PKGINFO "EMAIL=\"$email\"\n";
+print PKGINFO "PSTAMP=\"$pstamp\"\n";
+print PKGINFO "BASEDIR=\"$basedir\"\n";
+print PKGINFO "CLASSES=\"$classes\"\n";
+close PKGINFO;
+print "Done.\n";
+
+# Build and zip the package
+
+print "Building package\n";
+system ("$pkgmk -r `pwd`");
+system ("(cd /var/spool/pkg; $pkgtrans -s -o `pwd` /tmp/$packagename $pkg)");
+system ("gzip /tmp/$packagename");
+
+# Clean-up the spool area
+system ("(cd /var/spool/pkg; rm -rf $pkg)");
+# Clean-up the ~/packaging/ area
+system ("(rm -rf mysql*)");
+unlink $pkginfo;
+unlink $prototype;
+system ("mv /tmp/${packagename}.gz $pkgdir");
+print "Done. (~/$hostname/$packagename.gz)\n";
+# The subroutines
+sub chkvar {
+ print "\n";
+
+ print "PKG=$pkg\n";
+ print "NAME=$name\n";
+ print "ARCH=$arch\n";
+ print "VERSION=$version\n";
+ print "CATEGORY=$category\n";
+ print "VENDOR=$vendor\n";
+ print "EMAIL=$email\n";
+ print "PSTAMP=$pstamp\n";
+ print "BASEDIR=$basedir\n";
+ print "PACKAGENAME=$packagename\n";
+
+
+ print "\nIs this information correct? [Y/n]: ";
+ my $answer= <STDIN>;
+ chomp $answer;
+ $answer= 'Y' if ($answer eq "");
+ $answer= uc $answer;
+ my $res= ($answer eq 'Y')? 1 : 0;
+ return($res);
+}
+
+sub getvar_noq {
+ my $questionname = "@_";
+ $$questionname = $default{$questionname};
+}
+
+sub getvar {
+ my $questionname = "@_";
+ my $ucquestionname= uc $questionname;
+ print "$ucquestionname: $questions{$questionname} [$default{\"$questionname\"}]: ";
+ my $answer = <STDIN>;
+ chomp $answer;
+ $$questionname = $answer;
+ $$questionname = $default{$questionname} if ($$questionname eq "");
+}
diff --git a/acinclude.m4 b/acinclude.m4
index 4f2ad8daf91..5ddd8952c42 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -194,6 +194,8 @@ dnl Define zlib paths to point at bundled zlib
AC_DEFUN([MYSQL_USE_BUNDLED_ZLIB], [
ZLIB_INCLUDES="-I\$(top_srcdir)/zlib"
ZLIB_LIBS="\$(top_builddir)/zlib/libz.la"
+dnl Omit -L$pkglibdir as it's always in the list of mysql_config deps.
+ZLIB_DEPS="-lz"
zlib_dir="zlib"
AC_SUBST([zlib_dir])
mysql_cv_compress="yes"
@@ -235,8 +237,13 @@ dnl $prefix/lib. If zlib headers or binaries weren't found at $prefix, the
dnl macro bails out with error.
dnl
dnl If the library was found, this function #defines HAVE_COMPRESS
-dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and
-dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz).
+dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include),
+dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz) and ZLIB_DEPS which is
+dnl used in mysql_config and is always the same as ZLIB_LIBS except to
+dnl when we use the bundled zlib. In the latter case ZLIB_LIBS points to the
+dnl build dir ($top_builddir/zlib), while mysql_config must point to the
+dnl installation dir ($pkglibdir), so ZLIB_DEPS is set to point to
+dnl $pkglibdir.
AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [
AC_MSG_CHECKING([for zlib compression library])
@@ -285,7 +292,11 @@ case $SYSTEM_TYPE in
;;
esac
if test "$mysql_cv_compress" = "yes"; then
+ if test "x$ZLIB_DEPS" = "x"; then
+ ZLIB_DEPS="$ZLIB_LIBS"
+ fi
AC_SUBST([ZLIB_LIBS])
+ AC_SUBST([ZLIB_DEPS])
AC_SUBST([ZLIB_INCLUDES])
AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support])
fi
@@ -1039,7 +1050,6 @@ AC_MSG_CHECKING(for OpenSSL)
echo "You can't use the --all-static link option when using openssl."
exit 1
fi
- NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS $openssl_libs"
else
AC_MSG_RESULT(no)
if test ! -z "$openssl_includes"
@@ -1624,7 +1634,12 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
--without-ndb-debug Disable special ndb debug features],
[ndb_debug="$withval"],
[ndb_debug="default"])
-
+ AC_ARG_WITH([ndb-ccflags],
+ [
+ --with-ndb-ccflags Extra CC options for ndb compile],
+ [ndb_cxxflags_fix="$ndb_cxxflags_fix $withval"],
+ [ndb_cxxflags_fix=$ndb_cxxflags_fix])
+
AC_MSG_CHECKING([for NDB Cluster options])
AC_MSG_RESULT([])
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index d390a152fc7..0da7d5b3acf 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -1008,6 +1008,7 @@ static void usage(void)
print_defaults("my",load_default_groups);
puts("\nWhere command is a one or more of: (Commands may be shortened)\n\
create databasename Create a new database\n\
+ debug Instruct server to write debug information to log\n\
drop databasename Delete a database and all its tables\n\
extended-status Gives an extended status message from the server\n\
flush-hosts Flush all cached hosts\n\
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index c670b84db44..babf4de0c3d 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -195,7 +195,7 @@ static void usage(void)
puts("and you are welcome to modify and redistribute it under the GPL license.\n");
puts("This program can be used to CHECK (-c,-m,-C), REPAIR (-r), ANALYZE (-a)");
puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be");
- puts("used at the same time. It works on MyISAM and in some cases on BDB tables.");
+ puts("used at the same time. Not all options are supported by all storage engines.");
puts("Please consult the MySQL manual for latest information about the");
puts("above. The options -c,-r,-a and -o are exclusive to each other, which");
puts("means that the last option will be used, if several was specified.\n");
diff --git a/client/mysqldump.c b/client/mysqldump.c
index afaa2dc5a6d..52255ccb896 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -2091,27 +2091,27 @@ static int dump_all_tables_in_db(char *database)
RETURN
void
*/
-static void get_actual_table_name( const char *old_table_name,
- char *new_table_name,
- int buf_size )
-{
- MYSQL_RES *tableRes;
- MYSQL_ROW row;
- char query[ NAME_LEN + 50 ];
- DBUG_ENTER("get_actual_table_name");
+static void get_actual_table_name(const char *old_table_name,
+ char *new_table_name,
+ int buf_size)
+{
+ MYSQL_RES *tableRes;
+ MYSQL_ROW row;
+ char query[ NAME_LEN + 50 ];
+ DBUG_ENTER("get_actual_table_name");
- sprintf( query, "SHOW TABLES LIKE '%s'", old_table_name );
- if (mysql_query_with_error_report(sock, 0, query))
- {
- safe_exit(EX_MYSQLERR);
- }
+ sprintf( query, "SHOW TABLES LIKE '%s'", old_table_name);
+ if (mysql_query_with_error_report(sock, 0, query))
+ {
+ safe_exit(EX_MYSQLERR);
+ }
- tableRes = mysql_store_result( sock );
- row = mysql_fetch_row( tableRes );
- strncpy( new_table_name, row[0], buf_size );
- mysql_free_result(tableRes);
-} /* get_actual_table_name */
+ tableRes= mysql_store_result( sock );
+ row= mysql_fetch_row( tableRes );
+ strmake(new_table_name, row[0], buf_size-1);
+ mysql_free_result(tableRes);
+}
static int dump_selected_tables(char *db, char **table_names, int tables)
diff --git a/configure.in b/configure.in
index e57cdabad25..665029accb3 100644
--- a/configure.in
+++ b/configure.in
@@ -924,9 +924,11 @@ if test "$ac_cv_header_termio_h" = "no" -a "$ac_cv_header_termios_h" = "no"
then
AC_CHECK_FUNC(gtty, , AC_CHECK_LIB(compat, gtty))
fi
-# We make a special variable for client library's to avoid including
-# thread libs in the client.
-NON_THREADED_CLIENT_LIBS="$LIBS $ZLIB_LIBS"
+
+# We make a special variable for non-threaded version of LIBS to avoid
+# including thread libs into non-threaded version of MySQL client library.
+# Later in this script LIBS will be augmented with a threads library.
+NON_THREADED_LIBS="$LIBS"
AC_MSG_CHECKING([for int8])
case $SYSTEM_TYPE in
@@ -1502,7 +1504,7 @@ then
if test -f /usr/lib/libxnet.so -a "$SYSTEM_TYPE" = "sni-sysv4"
then
LIBS="-lxnet $LIBS"
- NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS -lxnet"
+ NON_THREADED_LIBS="-lxnet $NON_THREADED_LIBS"
with_named_thread="-Kthread $LDFLAGS -lxnet"
LD_FLAGS=""
CFLAGS="-Kthread $CFLAGS"
@@ -1677,8 +1679,8 @@ then
elif test "$with_debug" = "full"
then
# Full debug. Very slow in some cases
- CFLAGS="$DEBUG_CFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC $CFLAGS"
- CXXFLAGS="$DEBUG_CXXFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC $CXXFLAGS"
+ CFLAGS="$DEBUG_CFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC -DUNIV_DEBUG $CFLAGS"
+ CXXFLAGS="$DEBUG_CXXFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC -DUNIV_DEBUG $CXXFLAGS"
else
# Optimized version. No debug
CFLAGS="$OPTIMIZE_CFLAGS -DDBUG_OFF $CFLAGS"
@@ -2826,7 +2828,7 @@ dnl This probably should be cleaned up more - for now the threaded
dnl client is just using plain-old libs.
sql_client_dirs="libmysql strings regex client"
linked_client_targets="linked_libmysql_sources"
-CLIENT_LIBS=$NON_THREADED_CLIENT_LIBS
+
if test "$THREAD_SAFE_CLIENT" != "no"
then
sql_client_dirs="libmysql_r $sql_client_dirs"
@@ -2834,9 +2836,11 @@ then
AC_DEFINE([THREAD_SAFE_CLIENT], [1], [Should be client be thread safe])
fi
-CLIENT_LIBS="$CLIENT_LIBS $STATIC_NSS_FLAGS"
+CLIENT_LIBS="$NON_THREADED_LIBS $openssl_libs $ZLIB_LIBS $STATIC_NSS_FLAGS"
AC_SUBST(CLIENT_LIBS)
+AC_SUBST(NON_THREADED_LIBS)
+AC_SUBST(STATIC_NSS_FLAGS)
AC_SUBST(sql_client_dirs)
AC_SUBST(linked_client_targets)
@@ -2954,6 +2958,15 @@ EOF
AC_CONFIG_SUBDIRS(innobase)
fi
+case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in
+ *solaris*-i?86-no-yes)
+ # ndb fail for whatever strange reason to link Sun Forte/x86
+ # unless using incremental linker
+ CXXFLAGS="$CXXFLAGS -xildon"
+ ;;
+ *) ;;
+esac
+
if test X"$have_ndbcluster" = Xyes
then
if test X"$mysql_cv_compress" != Xyes
diff --git a/extra/perror.c b/extra/perror.c
index fc10d8eaecc..27027520cbe 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -213,12 +213,14 @@ int main(int argc,char *argv[])
string 'Unknown Error'. To avoid printing it we try to find the
error string by asking for an impossible big error message.
*/
- msg = strerror(10000);
+ msg= strerror(10000);
- /* allocate a buffer for unknown_error since strerror always returns the same pointer
- on some platforms such as Windows */
- unknown_error = malloc( strlen(msg)+1 );
- strcpy( unknown_error, msg );
+ /*
+ Allocate a buffer for unknown_error since strerror always returns
+ the same pointer on some platforms such as Windows
+ */
+ unknown_error= malloc(strlen(msg)+1);
+ strmov(unknown_error, msg);
for ( ; argc-- > 0 ; argv++)
{
@@ -271,7 +273,7 @@ int main(int argc,char *argv[])
/* if we allocated a buffer for unknown_error, free it now */
if (unknown_error)
- free(unknown_error);
+ free(unknown_error);
exit(error);
return error;
diff --git a/include/ft_global.h b/include/ft_global.h
index 94f6ad9ef51..c3f60d13a7a 100644
--- a/include/ft_global.h
+++ b/include/ft_global.h
@@ -62,7 +62,7 @@ void ft_free_stopwords(void);
#define FT_SORTED 2
#define FT_EXPAND 4 /* query expansion */
-FT_INFO *ft_init_search(uint,void *, uint, byte *, uint, byte *);
+FT_INFO *ft_init_search(uint,void *, uint, byte *, uint,CHARSET_INFO *, byte *);
my_bool ft_boolean_check_syntax_string(const byte *);
#ifdef __cplusplus
diff --git a/include/my_global.h b/include/my_global.h
index 3263d079853..7ca3d5e1e58 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -135,7 +135,13 @@
#ifdef HAVE_UNIXWARE7_THREADS
#include <thread.h>
#else
+#if defined(HPUX10) || defined(HPUX11)
+C_MODE_START /* HPUX needs this, signal.h bug */
+#include <pthread.h>
+C_MODE_END
+#else
#include <pthread.h> /* AIX must have this included first */
+#endif
#endif /* HAVE_UNIXWARE7_THREADS */
#endif /* HAVE_mit_thread */
#if !defined(SCO) && !defined(_REENTRANT)
diff --git a/include/mysql.h b/include/mysql.h
index 2c0197e2300..d8a56126756 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -334,6 +334,17 @@ typedef struct st_mysql_parameters
*/
int STDCALL mysql_server_init(int argc, char **argv, char **groups);
void STDCALL mysql_server_end(void);
+/*
+ mysql_server_init/end need to be called when using libmysqld or
+ libmysqlclient (exactly, mysql_server_init() is called by mysql_init() so
+ you don't need to call it explicitely; but you need to call
+ mysql_server_end() to free memory). The names are a bit misleading
+ (mysql_SERVER* to be used when using libmysqlCLIENT). So we add more general
+ names which suit well whether you're using libmysqld or libmysqlclient. We
+ intend to promote these aliases over the mysql_server* ones.
+*/
+#define mysql_library_init mysql_server_init
+#define mysql_library_end mysql_server_end
MYSQL_PARAMETERS *STDCALL mysql_get_parameters(void);
diff --git a/innobase/include/univ.i b/innobase/include/univ.i
index 6ae4fe1c2ce..625978ffc38 100644
--- a/innobase/include/univ.i
+++ b/innobase/include/univ.i
@@ -80,10 +80,6 @@ memory is read outside the allocated blocks. */
/* Make a non-inline debug version */
-#ifdef DBUG_ON
-#define UNIV_DEBUG
-#endif /* DBUG_ON */
-
/*
#define UNIV_DEBUG
#define UNIV_MEM_DEBUG
diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c
index aab3854dd34..4253b5ff96f 100644
--- a/myisam/ft_boolean_search.c
+++ b/myisam/ft_boolean_search.c
@@ -365,6 +365,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
reset_tree(& ftb->no_dupes);
}
+ ftbw->off=0; /* in case of reinit */
if (_ft2_search(ftb, ftbw, 1))
return;
}
@@ -373,7 +374,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
- uint query_len)
+ uint query_len, CHARSET_INFO *cs)
{
FTB *ftb;
FTB_EXPR *ftbe;
@@ -385,8 +386,8 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
ftb->state=UNINITIALIZED;
ftb->info=info;
ftb->keynr=keynr;
- ftb->charset= ((keynr==NO_SUCH_KEY) ?
- default_charset_info : info->s->keyinfo[keynr].seg->charset);
+ ftb->charset=cs;
+ DBUG_ASSERT(keynr==NO_SUCH_KEY || cs == info->s->keyinfo[keynr].seg->charset);
ftb->with_scan=0;
ftb->lastpos=HA_OFFSET_ERROR;
bzero(& ftb->no_dupes, sizeof(TREE));
diff --git a/myisam/ft_static.c b/myisam/ft_static.c
index 7168406d027..994a94d0c49 100644
--- a/myisam/ft_static.c
+++ b/myisam/ft_static.c
@@ -55,11 +55,12 @@ const struct _ft_vft _ft_vft_boolean = {
FT_INFO *ft_init_search(uint flags, void *info, uint keynr,
- byte *query, uint query_len, byte *record)
+ byte *query, uint query_len, CHARSET_INFO *cs,
+ byte *record)
{
FT_INFO *res;
if (flags & FT_BOOL)
- res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len);
+ res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len,cs);
else
res= ft_init_nlq_search((MI_INFO *)info, keynr, query, query_len, flags,
record);
diff --git a/myisam/ftdefs.h b/myisam/ftdefs.h
index e7a0829e140..ddb9fbfead2 100644
--- a/myisam/ftdefs.h
+++ b/myisam/ftdefs.h
@@ -131,7 +131,7 @@ FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const byte *);
uint _mi_ft_parse(TREE *, MI_INFO *, uint, const byte *, my_bool);
FT_INFO *ft_init_nlq_search(MI_INFO *, uint, byte *, uint, uint, byte *);
-FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint);
+FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint, CHARSET_INFO *);
extern const struct _ft_vft _ft_vft_nlq;
int ft_nlq_read_next(FT_INFO *, char *);
diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc
index 84e60657876..9b85197abe8 100644
--- a/mysql-test/include/have_ndb.inc
+++ b/mysql-test/include/have_ndb.inc
@@ -1,3 +1,4 @@
+--exec test x$NDB_STATUS_OK = x1
-- require r/have_ndb.require
disable_query_log;
show variables like "have_ndbcluster";
diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl
new file mode 100644
index 00000000000..5977bb380cf
--- /dev/null
+++ b/mysql-test/lib/mtr_cases.pl
@@ -0,0 +1,270 @@
+# -*- cperl -*-
+
+# This is a library file used by the Perl version of mysql-test-run,
+# and is part of the translation of the Bourne shell script with the
+# same name.
+
+use strict;
+
+sub collect_test_cases ($);
+sub collect_one_test_case ($$$$$);
+
+##############################################################################
+#
+# Collect information about test cases we are to run
+#
+##############################################################################
+
+sub collect_test_cases ($) {
+ my $suite= shift; # Test suite name
+
+ my $testdir;
+ my $resdir;
+
+ if ( $suite eq "main" )
+ {
+ $testdir= "$::glob_mysql_test_dir/t";
+ $resdir= "$::glob_mysql_test_dir/r";
+ }
+ else
+ {
+ $testdir= "$::glob_mysql_test_dir/suite/$suite/t";
+ $resdir= "$::glob_mysql_test_dir/suite/$suite/r";
+ }
+
+ my $cases = []; # Array of hash, will be array of C struct
+
+ opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!");
+
+ if ( @::opt_cases )
+ {
+ foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort
+ my $elem= "$tname.test";
+ if ( ! -f "$testdir/$elem")
+ {
+ mtr_error("Test case $tname ($testdir/$elem) is not found");
+ }
+ collect_one_test_case($testdir,$resdir,$tname,$elem,$cases);
+ }
+ closedir TESTDIR;
+ }
+ else
+ {
+ foreach my $elem ( sort readdir(TESTDIR) ) {
+ my $tname= mtr_match_extension($elem,"test");
+ next if ! defined $tname;
+ next if $::opt_do_test and ! defined mtr_match_prefix($elem,$::opt_do_test);
+
+ collect_one_test_case($testdir,$resdir,$tname,$elem,$cases);
+ }
+ closedir TESTDIR;
+ }
+
+ # To speed things up, we sort first in if the test require a restart
+ # or not, second in alphanumeric order.
+
+# @$cases = sort {
+# if ( $a->{'master_restart'} and $b->{'master_restart'} or
+# ! $a->{'master_restart'} and ! $b->{'master_restart'} )
+# {
+# return $a->{'name'} cmp $b->{'name'};
+# }
+# if ( $a->{'master_restart'} )
+# {
+# return 1; # Is greater
+# }
+# else
+# {
+# return -1; # Is less
+# }
+# } @$cases;
+
+ return $cases;
+}
+
+
+##############################################################################
+#
+# Collect information about a single test case
+#
+##############################################################################
+
+
+sub collect_one_test_case($$$$$) {
+ my $testdir= shift;
+ my $resdir= shift;
+ my $tname= shift;
+ my $elem= shift;
+ my $cases= shift;
+
+ my $path= "$testdir/$elem";
+
+ # ----------------------------------------------------------------------
+ # Skip some tests silently
+ # ----------------------------------------------------------------------
+
+ if ( $::opt_start_from and $tname lt $::opt_start_from )
+ {
+ return;
+ }
+
+ # ----------------------------------------------------------------------
+ # Skip some tests but include in list, just mark them to skip
+ # ----------------------------------------------------------------------
+
+ my $tinfo= {};
+ $tinfo->{'name'}= $tname;
+ $tinfo->{'result_file'}= "$resdir/$tname.result";
+ push(@$cases, $tinfo);
+
+ if ( $::opt_skip_test and defined mtr_match_prefix($tname,$::opt_skip_test) )
+ {
+ $tinfo->{'skip'}= 1;
+ return;
+ }
+
+ # FIXME temporary solution, we have a hard coded list of test cases to
+ # skip if we are using the embedded server
+
+ if ( $::glob_use_embedded_server and
+ mtr_match_any_exact($tname,\@::skip_if_embedded_server) )
+ {
+ $tinfo->{'skip'}= 1;
+ return;
+ }
+
+ # ----------------------------------------------------------------------
+ # Collect information about test case
+ # ----------------------------------------------------------------------
+
+ $tinfo->{'path'}= $path;
+ $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work
+
+ if ( defined mtr_match_prefix($tname,"rpl") )
+ {
+ if ( $::opt_skip_rpl )
+ {
+ $tinfo->{'skip'}= 1;
+ return;
+ }
+
+ $tinfo->{'slave_num'}= 1; # Default, use one slave
+
+ # FIXME currently we always restart slaves
+ $tinfo->{'slave_restart'}= 1;
+
+ if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' )
+ {
+# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange
+ }
+ }
+
+ # FIXME what about embedded_server + ndbcluster, skip ?!
+
+ my $master_opt_file= "$testdir/$tname-master.opt";
+ my $slave_opt_file= "$testdir/$tname-slave.opt";
+ my $slave_mi_file= "$testdir/$tname.slave-mi";
+ my $master_sh= "$testdir/$tname-master.sh";
+ my $slave_sh= "$testdir/$tname-slave.sh";
+ my $disabled= "$testdir/$tname.disabled";
+
+ $tinfo->{'master_opt'}= [];
+ $tinfo->{'slave_opt'}= [];
+ $tinfo->{'slave_mi'}= [];
+
+ if ( -f $master_opt_file )
+ {
+ $tinfo->{'master_restart'}= 1; # We think so for now
+ # This is a dirty hack from old mysql-test-run, we use the opt file
+ # to flag other things as well, it is not a opt list at all
+ my $extra_master_opt= mtr_get_opts_from_file($master_opt_file);
+
+ foreach my $opt (@$extra_master_opt)
+ {
+ my $value;
+
+ $value= mtr_match_prefix($opt, "--timezone=");
+
+ if ( defined $value )
+ {
+ $tinfo->{'timezone'}= $value;
+ $extra_master_opt= [];
+ $tinfo->{'master_restart'}= 0;
+ last;
+ }
+
+ $value= mtr_match_prefix($opt, "--result-file=");
+
+ if ( defined $value )
+ {
+ $tinfo->{'result_file'}= "r/$value.result";
+ if ( $::opt_result_ext and $::opt_record or
+ -f "$tinfo->{'result_file'}$::opt_result_ext")
+ {
+ $tinfo->{'result_file'}.= $::opt_result_ext;
+ }
+ $extra_master_opt= [];
+ $tinfo->{'master_restart'}= 0;
+ last;
+ }
+ }
+
+ $tinfo->{'master_opt'}= $extra_master_opt;
+ }
+
+ if ( -f $slave_opt_file )
+ {
+ $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file);
+ $tinfo->{'slave_restart'}= 1;
+ }
+
+ if ( -f $slave_mi_file )
+ {
+ $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file);
+ $tinfo->{'slave_restart'}= 1;
+ }
+
+ if ( -f $master_sh )
+ {
+ if ( $::glob_win32_perl )
+ {
+ $tinfo->{'skip'}= 1;
+ }
+ else
+ {
+ $tinfo->{'master_sh'}= $master_sh;
+ $tinfo->{'master_restart'}= 1;
+ }
+ }
+
+ if ( -f $slave_sh )
+ {
+ if ( $::glob_win32_perl )
+ {
+ $tinfo->{'skip'}= 1;
+ }
+ else
+ {
+ $tinfo->{'slave_sh'}= $slave_sh;
+ $tinfo->{'slave_restart'}= 1;
+ }
+ }
+
+ if ( -f $disabled )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'disable'}= 1; # Sub type of 'skip'
+ $tinfo->{'comment'}= mtr_fromfile($disabled);
+ }
+
+ # We can't restart a running server that may be in use
+
+ if ( $::glob_use_running_server and
+ ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) )
+ {
+ $tinfo->{'skip'}= 1;
+ }
+}
+
+
+1;
diff --git a/mysql-test/lib/mtr_io.pl b/mysql-test/lib/mtr_io.pl
index 017ba11645b..b3da6d97664 100644
--- a/mysql-test/lib/mtr_io.pl
+++ b/mysql-test/lib/mtr_io.pl
@@ -8,6 +8,7 @@ use strict;
sub mtr_get_pid_from_file ($);
sub mtr_get_opts_from_file ($);
+sub mtr_fromfile ($);
sub mtr_tofile ($@);
sub mtr_tonewfile($@);
@@ -107,6 +108,8 @@ sub mtr_fromfile ($) {
open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!");
my $text= join('', <FILE>);
close FILE;
+ $text =~ s/^\s+//; # Remove starting space, incl newlines
+ $text =~ s/\s+$//; # Remove ending space, incl newlines
return $text;
}
diff --git a/mysql-test/lib/mtr_process.pl b/mysql-test/lib/mtr_process.pl
index e832468d0cb..e1461a9730c 100644
--- a/mysql-test/lib/mtr_process.pl
+++ b/mysql-test/lib/mtr_process.pl
@@ -5,14 +5,19 @@
# same name.
#use Carp qw(cluck);
+use Socket;
+use Errno;
use strict;
-use POSIX ":sys_wait_h";
+#use POSIX ":sys_wait_h";
+use POSIX 'WNOHANG';
sub mtr_run ($$$$$$);
sub mtr_spawn ($$$$$$);
-sub mtr_stop_mysqld_servers ($$);
+sub mtr_stop_mysqld_servers ($);
sub mtr_kill_leftovers ();
+sub mtr_record_dead_children ();
+sub sleep_until_file_created ($$$);
# static in C
sub spawn_impl ($$$$$$$);
@@ -34,7 +39,18 @@ sub mtr_run ($$$$$$) {
my $error= shift;
my $pid_file= shift;
- return spawn_impl($path,$arg_list_t,1,$input,$output,$error,$pid_file);
+ return spawn_impl($path,$arg_list_t,'run',$input,$output,$error,$pid_file);
+}
+
+sub mtr_run_test ($$$$$$) {
+ my $path= shift;
+ my $arg_list_t= shift;
+ my $input= shift;
+ my $output= shift;
+ my $error= shift;
+ my $pid_file= shift;
+
+ return spawn_impl($path,$arg_list_t,'test',$input,$output,$error,$pid_file);
}
sub mtr_spawn ($$$$$$) {
@@ -45,7 +61,7 @@ sub mtr_spawn ($$$$$$) {
my $error= shift;
my $pid_file= shift;
- return spawn_impl($path,$arg_list_t,0,$input,$output,$error,$pid_file);
+ return spawn_impl($path,$arg_list_t,'spawn',$input,$output,$error,$pid_file);
}
@@ -58,7 +74,7 @@ sub mtr_spawn ($$$$$$) {
sub spawn_impl ($$$$$$$) {
my $path= shift;
my $arg_list_t= shift;
- my $join= shift;
+ my $mode= shift;
my $input= shift;
my $output= shift;
my $error= shift;
@@ -71,107 +87,203 @@ sub spawn_impl ($$$$$$$) {
print STDERR "#### ", "STDIN $input\n" if $input;
print STDERR "#### ", "STDOUT $output\n" if $output;
print STDERR "#### ", "STDERR $error\n" if $error;
- if ( $join )
- {
- print STDERR "#### ", "RUN ";
- }
- else
- {
- print STDERR "#### ", "SPAWN ";
- }
- print STDERR "$path ", join(" ",@$arg_list_t), "\n";
+ print STDERR "#### ", "$mode : $path ", join(" ",@$arg_list_t), "\n";
print STDERR "#### ", "-" x 78, "\n";
}
- my $pid= fork();
- if ( ! defined $pid )
+ FORK:
{
- mtr_error("$path ($pid) can't be forked");
- }
+ my $pid= fork();
- if ( $pid )
- {
- # Parent, i.e. the main script
- if ( $join )
+ if ( ! defined $pid )
{
- # We run a command and wait for the result
- # FIXME this need to be improved
- my $res= waitpid($pid,0);
-
- if ( $res == -1 )
+ if ( $! == $!{EAGAIN} ) # See "perldoc Errno"
{
- mtr_error("$path ($pid) got lost somehow");
+ mtr_debug("Got EAGAIN from fork(), sleep 1 second and redo");
+ sleep(1);
+ redo FORK;
}
- my $exit_value= $? >> 8;
- my $signal_num= $? & 127;
- my $dumped_core= $? & 128;
- if ( $signal_num )
- {
- mtr_error("$path ($pid) got signal $signal_num");
- }
- if ( $dumped_core )
+ else
{
- mtr_error("$path ($pid) dumped core");
+ mtr_error("$path ($pid) can't be forked");
}
- return $exit_value;
+ }
+
+ if ( $pid )
+ {
+ spawn_parent_impl($pid,$mode,$path);
}
else
{
- # We spawned a process we don't wait for
- return $pid;
+ # Child, redirect output and exec
+ # FIXME I tried POSIX::setsid() here to detach and, I hoped,
+ # avoid zombies. But everything went wild, somehow the parent
+ # became a deamon as well, and was hard to kill ;-)
+ # Need to catch SIGCHLD and do waitpid or something instead......
+
+ $SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't
+
+ if ( $output )
+ {
+ if ( ! open(STDOUT,">",$output) )
+ {
+ mtr_error("can't redirect STDOUT to \"$output\": $!");
+ }
+ }
+ if ( $error )
+ {
+ if ( $output eq $error )
+ {
+ if ( ! open(STDERR,">&STDOUT") )
+ {
+ mtr_error("can't dup STDOUT: $!");
+ }
+ }
+ else
+ {
+ if ( ! open(STDERR,">",$error) )
+ {
+ mtr_error("can't redirect STDERR to \"$output\": $!");
+ }
+ }
+ }
+ if ( $input )
+ {
+ if ( ! open(STDIN,"<",$input) )
+ {
+ mtr_error("can't redirect STDIN to \"$input\": $!");
+ }
+ }
+ exec($path,@$arg_list_t);
}
}
- else
- {
- # Child, redirect output and exec
- # FIXME I tried POSIX::setsid() here to detach and, I hoped,
- # avoid zombies. But everything went wild, somehow the parent
- # became a deamon as well, and was hard to kill ;-)
- # Need to catch SIGCHLD and do waitpid or something instead......
+}
+
+
+sub spawn_parent_impl {
+ my $pid= shift;
+ my $mode= shift;
+ my $path= shift;
- $SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't
+ if ( $mode eq 'run' or $mode eq 'test' )
+ {
+ my $exit_value= -1;
+ my $signal_num= 0;
+ my $dumped_core= 0;
- if ( $output )
+ if ( $mode eq 'run' )
{
- if ( ! open(STDOUT,">",$output) )
+ # Simple run of command, we wait for it to return
+ my $ret_pid= waitpid($pid,0);
+
+ if ( $ret_pid <= 0 )
{
- mtr_error("can't redirect STDOUT to \"$output\": $!");
+ mtr_error("$path ($pid) got lost somehow");
}
+
+ $exit_value= $? >> 8;
+ $signal_num= $? & 127;
+ $dumped_core= $? & 128;
+
+ return $exit_value;
}
- if ( $error )
+ else
{
- if ( $output eq $error )
+ # We run mysqltest and wait for it to return. But we try to
+ # catch dying mysqld processes as well.
+ #
+ # We do blocking waitpid() until we get the return from the
+ # "mysqltest" call. But if a mysqld process dies that we
+ # started, we take this as an error, and kill mysqltest.
+ #
+ # FIXME is this as it should be? Can't mysqld terminate
+ # normally from running a test case?
+
+ my $ret_pid; # What waitpid() returns
+
+ while ( ($ret_pid= waitpid(-1,0)) != -1 )
{
- if ( ! open(STDERR,">&STDOUT") )
+ # Someone terminated, don't know who. Collect
+ # status info first before $? is lost,
+ # but not $exit_value, this is flagged from
+ #
+
+ if ( $ret_pid == $pid )
{
- mtr_error("can't dup STDOUT: $!");
+ # We got termination of mysqltest, we are done
+ $exit_value= $? >> 8;
+ $signal_num= $? & 127;
+ $dumped_core= $? & 128;
+ last;
}
- }
- else
- {
- if ( ! open(STDERR,">",$error) )
+
+ # If one of the mysqld processes died, we want to
+ # mark this, and kill the mysqltest process.
+
+ foreach my $idx (0..1)
{
- mtr_error("can't redirect STDERR to \"$output\": $!");
+ if ( $::master->[$idx]->{'pid'} eq $ret_pid )
+ {
+ mtr_debug("child $ret_pid was master[$idx], " .
+ "exit during mysqltest run");
+ $::master->[$idx]->{'pid'}= 0;
+ last;
+ }
}
+
+ foreach my $idx (0..2)
+ {
+ if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
+ {
+ mtr_debug("child $ret_pid was slave[$idx], " .
+ "exit during mysqltest run");
+ $::slave->[$idx]->{'pid'}= 0;
+ last;
+ }
+ }
+
+ mtr_debug("waitpid() catched exit of unknown child $ret_pid, " .
+ "exit during mysqltest run");
}
- }
- if ( $input )
- {
- if ( ! open(STDIN,"<",$input) )
+
+ if ( $ret_pid != $pid )
{
- mtr_error("can't redirect STDIN to \"$input\": $!");
+ # We terminated the waiting because a "mysqld" process died.
+ # Kill the mysqltest process.
+
+ kill(9,$pid);
+
+ $ret_pid= waitpid($pid,0);
+
+ if ( $ret_pid == -1 )
+ {
+ mtr_error("$path ($pid) got lost somehow");
+ }
}
+
+ return $exit_value;
}
- exec($path,@$arg_list_t);
+ }
+ else
+ {
+ # We spawned a process we don't wait for
+ return $pid;
}
}
+
+
##############################################################################
#
# Kill processes left from previous runs
#
##############################################################################
+# We just "ping" on the ports, and if we can't do a socket connect
+# we assume the server is dead. So we don't *really* know a server
+# is dead, we just hope that it after letting the listen port go,
+# it is dead enough for us to start a new server.
+
sub mtr_kill_leftovers () {
# First, kill all masters and slaves that would conflict with
@@ -199,10 +311,23 @@ sub mtr_kill_leftovers () {
});
}
- mtr_stop_mysqld_servers(\@args, 1);
+ mtr_mysqladmin_shutdown(\@args);
+
+ # We now have tried to terminate nice. We have waited for the listen
+ # port to be free, but can't really tell if the mysqld process died
+ # or not. We now try to find the process PID from the PID file, and
+ # send a kill to that process. Note that Perl let kill(0,@pids) be
+ # a way to just return the numer of processes the kernel can send
+ # signals to. So this can be used (except on Cygwin) to determine
+ # if there are processes left running that we cound out might exists.
+ #
+ # But still after all this work, all we know is that we have
+ # the ports free.
# We scan the "var/run/" directory for other process id's to kill
- my $rundir= "$::glob_mysql_test_dir/var/run"; # FIXME $path_run_dir or something
+
+ # FIXME $path_run_dir or something
+ my $rundir= "$::glob_mysql_test_dir/var/run";
if ( -d $rundir )
{
@@ -218,193 +343,157 @@ sub mtr_kill_leftovers () {
if ( -f $pidfile )
{
my $pid= mtr_get_pid_from_file($pidfile);
- if ( ! unlink($pidfile) )
+
+ # Race, could have been removed between I tested with -f
+ # and the unlink() below, so I better check again with -f
+
+ if ( ! unlink($pidfile) and -f $pidfile )
{
mtr_error("can't remove $pidfile");
}
- push(@pids, $pid);
+
+ if ( $::glob_cygwin_perl or kill(0, $pid) )
+ {
+ push(@pids, $pid); # We know (cygwin guess) it exists
+ }
}
}
closedir(RUNDIR);
- start_reap_all();
-
- if ( $::glob_cygwin_perl )
+ if ( @pids )
{
- # We have no (easy) way of knowing the Cygwin controlling
- # process, in the PID file we only have the Windows process id.
- system("kill -f " . join(" ",@pids)); # Hope for the best....
- }
- else
- {
- my $retries= 10; # 10 seconds
- do
+ if ( $::glob_cygwin_perl )
{
- kill(9, @pids);
- } while ( $retries-- and kill(0, @pids) );
-
- if ( kill(0, @pids) )
+ # We have no (easy) way of knowing the Cygwin controlling
+ # process, in the PID file we only have the Windows process id.
+ system("kill -f " . join(" ",@pids)); # Hope for the best....
+ mtr_debug("Sleep 5 seconds waiting for processes to die");
+ sleep(5);
+ }
+ else
{
- mtr_error("can't kill processes " . join(" ", @pids));
+ my $retries= 10; # 10 seconds
+ do
+ {
+ kill(9, @pids);
+ mtr_debug("Sleep 1 second waiting for processes to die");
+ sleep(1) # Wait one second
+ } while ( $retries-- and kill(0, @pids) );
+
+ if ( kill(0, @pids) ) # Check if some left
+ {
+ # FIXME maybe just mtr_warning() ?
+ mtr_error("can't kill process(es) " . join(" ", @pids));
+ }
}
}
+ }
+
+ # We may have failed everything, bug we now check again if we have
+ # the listen ports free to use, and if they are free, just go for it.
- stop_reap_all();
+ foreach my $srv ( @args )
+ {
+ if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
+ {
+ mtr_error("can't kill old mysqld holding port $srv->{'port'}");
+ }
}
}
##############################################################################
#
-# Shut down mysqld servers
+# Shut down mysqld servers we have started from this run of this script
#
##############################################################################
-# To speed things we kill servers in parallel.
-# The argument is a list of 'pidfiles' and 'socketfiles'.
-# We use the pidfiles and socketfiles to try to terminate the servers.
-# This is not perfect, there could still be other server processes
-# left.
-
-# Force flag is to be set only for killing mysqld servers this script
-# didn't create in this run, i.e. initial cleanup before we start working.
-# If force flag is set, we try to kill all with mysqladmin, and
-# give up if we have no PIDs.
+# To speed things we kill servers in parallel. The argument is a list
+# of 'ports', 'pids', 'pidfiles' and 'socketfiles'.
-# FIXME On some operating systems, $srv->{'pid'} and $srv->{'pidfile'}
-# will not be the same PID. We need to try to kill both I think.
+# FIXME On Cygwin, and maybe some other platforms, $srv->{'pid'} and
+# $srv->{'pidfile'} will not be the same PID. We need to try to kill
+# both I think.
-sub mtr_stop_mysqld_servers ($$) {
+sub mtr_stop_mysqld_servers ($) {
my $spec= shift;
- my $force= shift;
# ----------------------------------------------------------------------
- # If the process was not started from this file, we got no PID,
- # we try to find it in the PID file.
+ # First try nice normal shutdown using 'mysqladmin'
# ----------------------------------------------------------------------
- my $any_pid= 0; # If we have any PIDs
+ mtr_mysqladmin_shutdown($spec);
+
+ # ----------------------------------------------------------------------
+ # We loop with waitpid() nonblocking to see how many of the ones we
+ # are to kill, actually got killed by mtr_mysqladmin_shutdown().
+ # Note that we don't rely on this, the mysqld server might have stop
+ # listening to the port, but still be alive. But it is a start.
+ # ----------------------------------------------------------------------
foreach my $srv ( @$spec )
{
- if ( ! $srv->{'pid'} and -f $srv->{'pidfile'} )
- {
- $srv->{'pid'}= mtr_get_pid_from_file($srv->{'pidfile'});
- }
- if ( $srv->{'pid'} )
+ if ( $srv->{'pid'} and (waitpid($srv->{'pid'},&WNOHANG) == $srv->{'pid'}) )
{
- $any_pid= 1;
+ $srv->{'pid'}= 0;
}
}
- # If the processes where started from this script, and we know
- # no PIDs, then we don't have to do anything.
-
- if ( ! $any_pid and ! $force )
- {
- # cluck "This is how we got here!";
- return;
- }
-
# ----------------------------------------------------------------------
- # First try nice normal shutdown using 'mysqladmin'
+ # We know the process was started from this file, so there is a PID
+ # saved, or else we have nothing to do.
+ # Might be that is is recorded to be missing, but we failed to
+ # take away the PID file earlier, then we do it now.
# ----------------------------------------------------------------------
- start_reap_all(); # Don't require waitpid() of children
+ my %mysqld_pids;
foreach my $srv ( @$spec )
{
- if ( -e $srv->{'sockfile'} or $srv->{'port'} )
+ if ( $srv->{'pid'} )
{
- # FIXME wrong log.....
- # FIXME, stderr.....
- # Shutdown time must be high as slave may be in reconnect
- my $args;
-
- mtr_init_args(\$args);
-
- mtr_add_arg($args, "--no-defaults");
- mtr_add_arg($args, "--user=%s", $::opt_user);
- mtr_add_arg($args, "--password=");
- if ( -e $srv->{'sockfile'} )
- {
- mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'});
- }
- if ( $srv->{'port'} )
- {
- mtr_add_arg($args, "--port=%s", $srv->{'port'});
- }
- mtr_add_arg($args, "--connect_timeout=5");
- mtr_add_arg($args, "--shutdown_timeout=20");
- mtr_add_arg($args, "--protocol=tcp"); # FIXME new thing, will it help?!
- mtr_add_arg($args, "shutdown");
- # We don't wait for termination of mysqladmin
- mtr_spawn($::exe_mysqladmin, $args,
- "", $::path_manager_log, $::path_manager_log, "");
+ $mysqld_pids{$srv->{'pid'}}= 1;
}
- }
-
- # Wait for them all to remove their pid and socket file
-
- PIDSOCKFILEREMOVED:
- for (my $loop= $::opt_sleep_time_for_delete; $loop; $loop--)
- {
- my $pidsockfiles_left= 0;
- foreach my $srv ( @$spec )
+ else
{
- if ( -e $srv->{'sockfile'} or -f $srv->{'pidfile'} )
+ # Race, could have been removed between I tested with -f
+ # and the unlink() below, so I better check again with -f
+
+ if ( -f $srv->{'pidfile'} and ! unlink($srv->{'pidfile'}) and
+ -f $srv->{'pidfile'} )
{
- $pidsockfiles_left++; # Could be that pidfile is left
+ mtr_error("can't remove $srv->{'pidfile'}");
}
}
- if ( ! $pidsockfiles_left )
- {
- last PIDSOCKFILEREMOVED;
- }
- if ( $loop % 20 == 1 )
- {
- mtr_warning("Still processes alive after 10 seconds, retrying for $loop seconds...");
- }
- mtr_debug("Sleep for 1 second waiting for pid and socket file removal");
- sleep(1); # One second
}
# ----------------------------------------------------------------------
- # If no known PIDs, we have nothing more to try
+ # If the processes where started from this script, and we had no PIDS
+ # then we don't have to do anything.
# ----------------------------------------------------------------------
- if ( ! $any_pid )
+ if ( ! keys %mysqld_pids )
{
- stop_reap_all();
+ # cluck "This is how we got here!";
return;
}
# ----------------------------------------------------------------------
- # We may have killed all that left a socket, but we are not sure we got
- # them all killed. If we suspect it lives, try nice kill with SIG_TERM.
- # Note that for true Win32 processes, kill(0,$pid) will not return 1.
+ # In mtr_mysqladmin_shutdown() we only waited for the mysqld servers
+ # not to listen to the port. But we are not sure we got them all
+ # killed. If we suspect it lives, try nice kill with SIG_TERM. Note
+ # that for true Win32 processes, kill(0,$pid) will not return 1.
# ----------------------------------------------------------------------
SIGNAL:
foreach my $sig (15,9)
{
- my $process_left= 0;
- foreach my $srv ( @$spec )
+ my $retries= 10; # 10 seconds
+ kill($sig, keys %mysqld_pids);
+ while ( $retries-- and kill(0, keys %mysqld_pids) )
{
- if ( $srv->{'pid'} and
- ( -f $srv->{'pidfile'} or kill(0,$srv->{'pid'}) ) )
- {
- $process_left++;
- mtr_warning("process $srv->{'pid'} not cooperating, " .
- "will send signal $sig to process");
- kill($sig,$srv->{'pid'}); # SIG_TERM
- }
- if ( ! $process_left )
- {
- last SIGNAL;
- }
+ mtr_debug("Sleep 1 second waiting for processes to die");
+ sleep(1) # Wait one second
}
- mtr_debug("Sleep for 5 seconds waiting for processes to die");
- sleep(5); # We wait longer than usual
}
# ----------------------------------------------------------------------
@@ -437,8 +526,8 @@ sub mtr_stop_mysqld_servers ($$) {
foreach my $file ($srv->{'pidfile'}, $srv->{'sockfile'})
{
- unlink($file);
- if ( -e $file )
+ # Know it is dead so should be no race, careful anyway
+ if ( -f $file and ! unlink($file) and -f $file )
{
$errors++;
mtr_warning("couldn't delete $file");
@@ -454,9 +543,147 @@ sub mtr_stop_mysqld_servers ($$) {
}
}
- stop_reap_all();
+ # FIXME We just assume they are all dead, for Cygwin we are not
+ # really sure
+
+}
+
+
+##############################################################################
+#
+# Shut down mysqld servers using "mysqladmin ... shutdown".
+# To speed this up, we start them in parallel and use waitpid() to
+# catch their termination. Note that this doesn't say the servers
+# are terminated, just that 'mysqladmin' is terminated.
+#
+# Note that mysqladmin will ask the server about what PID file it uses,
+# and mysqladmin will wait for it to be removed before it terminates
+# (unless passes timeout).
+#
+# This function will take at most about 20 seconds, and we still are not
+# sure we killed them all. If none is responding to ping, we return 1,
+# else we return 0.
+#
+##############################################################################
+
+sub mtr_mysqladmin_shutdown () {
+ my $spec= shift;
+
+ my @mysql_admin_pids;
+ my @to_kill_specs;
+
+ foreach my $srv ( @$spec )
+ {
+ if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
+ {
+ push(@to_kill_specs, $srv);
+ }
+ }
+
+
+ foreach my $srv ( @to_kill_specs )
+ {
+ # FIXME wrong log.....
+ # FIXME, stderr.....
+ # Shutdown time must be high as slave may be in reconnect
+ my $args;
+
+ mtr_init_args(\$args);
+
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--user=%s", $::opt_user);
+ mtr_add_arg($args, "--password=");
+ if ( -e $srv->{'sockfile'} )
+ {
+ mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'});
+ }
+ if ( $srv->{'port'} )
+ {
+ mtr_add_arg($args, "--port=%s", $srv->{'port'});
+ }
+ if ( $srv->{'port'} and ! -e $srv->{'sockfile'} )
+ {
+ mtr_add_arg($args, "--protocol=tcp"); # Needed if no --socket
+ }
+ mtr_add_arg($args, "--connect_timeout=5");
+ mtr_add_arg($args, "--shutdown_timeout=20");
+ mtr_add_arg($args, "shutdown");
+ # We don't wait for termination of mysqladmin
+ my $pid= mtr_spawn($::exe_mysqladmin, $args,
+ "", $::path_manager_log, $::path_manager_log, "");
+ push(@mysql_admin_pids, $pid);
+ }
+
+ # We wait blocking, we wait for the last one anyway
+ foreach my $pid (@mysql_admin_pids)
+ {
+ waitpid($pid,0); # FIXME no need to check -1 or 0?
+ }
- # FIXME We just assume they are all dead, we don't know....
+ # If we trusted "mysqladmin --shutdown_timeout= ..." we could just
+ # terminate now, but we don't (FIXME should be debugged).
+ # So we try again to ping and at least wait the same amount of time
+ # mysqladmin would for all to die.
+
+ my $timeout= 20; # 20 seconds max
+ my $res= 1; # If we just fall through, we are done
+
+ TIME:
+ while ( $timeout-- )
+ {
+ foreach my $srv ( @to_kill_specs )
+ {
+ $res= 1; # We are optimistic
+ if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
+ {
+ mtr_debug("Sleep 1 second waiting for processes to stop using port");
+ sleep(1); # One second
+ $res= 0;
+ next TIME;
+ }
+ }
+ last; # If we got here, we are done
+ }
+
+ return $res;
+}
+
+##############################################################################
+#
+# The operating system will keep information about dead children,
+# we read this information here, and if we have records the process
+# is alive, we mark it as dead.
+#
+##############################################################################
+
+sub mtr_record_dead_children () {
+
+ my $ret_pid;
+
+ # FIXME the man page says to wait for -1 to terminate,
+ # but on OS X we get '0' all the time...
+ while ( ($ret_pid= waitpid(-1,&WNOHANG)) > 0 )
+ {
+ mtr_debug("waitpid() catched exit of child $ret_pid");
+ foreach my $idx (0..1)
+ {
+ if ( $::master->[$idx]->{'pid'} eq $ret_pid )
+ {
+ mtr_debug("child $ret_pid was master[$idx]");
+ $::master->[$idx]->{'pid'}= 0;
+ }
+ }
+
+ foreach my $idx (0..2)
+ {
+ if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
+ {
+ mtr_debug("child $ret_pid was slave[$idx]");
+ $::slave->[$idx]->{'pid'}= 0;
+ last;
+ }
+ }
+ }
}
sub start_reap_all {
@@ -467,6 +694,32 @@ sub stop_reap_all {
$SIG{CHLD}= 'DEFAULT';
}
+sub mtr_ping_mysqld_server () {
+ my $port= shift;
+
+ my $remote= "localhost";
+ my $iaddr= inet_aton($remote);
+ if ( ! $iaddr )
+ {
+ mtr_error("can't find IP number for $remote");
+ }
+ my $paddr= sockaddr_in($port, $iaddr);
+ my $proto= getprotobyname('tcp');
+ if ( ! socket(SOCK, PF_INET, SOCK_STREAM, $proto) )
+ {
+ mtr_error("can't create socket: $!");
+ }
+ if ( connect(SOCK, $paddr) )
+ {
+ close(SOCK); # FIXME check error?
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
##############################################################################
#
# Wait for a file to be created
@@ -474,33 +727,38 @@ sub stop_reap_all {
##############################################################################
-sub sleep_until_file_created ($$) {
+sub sleep_until_file_created ($$$) {
my $pidfile= shift;
my $timeout= shift;
+ my $pid= shift;
- my $loop= $timeout;
- while ( $loop-- )
+ for ( my $loop= 1; $loop <= $timeout; $loop++ )
{
if ( -r $pidfile )
{
- return;
+ return 1;
}
- mtr_debug("Sleep for 1 second waiting for creation of $pidfile");
- if ( $loop % 20 == 1 )
+ # Check if it died after the fork() was successful
+ if ( waitpid($pid,&WNOHANG) == $pid )
{
- mtr_warning("Waiting for $pidfile to be created, still trying for $loop seconds...");
+ return 0;
+ }
+
+ mtr_debug("Sleep 1 second waiting for creation of $pidfile");
+
+ if ( $loop % 60 == 0 )
+ {
+ my $left= $timeout - $loop;
+ mtr_warning("Waited $loop seconds for $pidfile to be created, " .
+ "still waiting for $left seconds...");
}
sleep(1);
}
- if ( ! -r $pidfile )
- {
- mtr_error("No $pidfile was created");
- }
+ return 0;
}
-
1;
diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl
index 0f75fc1341a..c45bb1601ce 100644
--- a/mysql-test/lib/mtr_report.pl
+++ b/mysql-test/lib/mtr_report.pl
@@ -10,6 +10,7 @@ sub mtr_report_test_name($);
sub mtr_report_test_passed($);
sub mtr_report_test_failed($);
sub mtr_report_test_skipped($);
+sub mtr_report_test_disabled($);
sub mtr_show_failed_diff ($);
sub mtr_report_stats ($);
@@ -72,7 +73,14 @@ sub mtr_report_test_skipped ($) {
my $tinfo= shift;
$tinfo->{'result'}= 'MTR_RES_SKIPPED';
- print "[ skipped ]\n";
+ if ( $tinfo->{'disable'} )
+ {
+ print "[ disabled ] $tinfo->{'comment'}\n";
+ }
+ else
+ {
+ print "[ skipped ]\n";
+ }
}
sub mtr_report_test_passed ($) {
@@ -95,9 +103,18 @@ sub mtr_report_test_failed ($) {
$tinfo->{'result'}= 'MTR_RES_FAILED';
print "[ fail ]\n";
- print "Errors are (from $::path_timefile) :\n";
- print mtr_fromfile($::path_timefile); # FIXME print_file() instead
- print "\n(the last lines may be the most important ones)\n";
+ # FIXME Instead of this test, and meaningless error message in 'else'
+ # we should write out into $::path_timefile when the error occurs.
+ if ( -f $::path_timefile )
+ {
+ print "Errors are (from $::path_timefile) :\n";
+ print mtr_fromfile($::path_timefile); # FIXME print_file() instead
+ print "\n(the last lines may be the most important ones)\n";
+ }
+ else
+ {
+ print "Unexpected termination, probably when starting mysqld\n";
+ }
}
sub mtr_report_stats ($) {
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 3bbdb48d98a..f204fee50ed 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -84,10 +84,11 @@ use Sys::Hostname;
#use Carp;
use IO::Socket;
use IO::Socket::INET;
-use Data::Dumper;
+#use Data::Dumper;
use strict;
#use diagnostics;
+require "lib/mtr_cases.pl";
require "lib/mtr_process.pl";
require "lib/mtr_io.pl";
require "lib/mtr_gcov.pl";
@@ -165,14 +166,12 @@ our $glob_user= 'test';
our $glob_use_embedded_server= 0;
our $glob_basedir;
-our $glob_do_test;
# The total result
our $path_charsetsdir;
our $path_client_bindir;
our $path_language;
-our $path_tests_bindir;
our $path_timefile;
our $path_manager_log; # Used by mysqldadmin
our $path_slave_load_tmpdir; # What is this?!
@@ -192,8 +191,10 @@ our $exe_master_mysqld;
our $exe_mysql;
our $exe_mysqladmin;
our $exe_mysqlbinlog;
+our $exe_mysql_client_test;
our $exe_mysqld;
our $exe_mysqldump; # Called from test case
+our $exe_mysql_fix_system_tables;
our $exe_mysqltest;
our $exe_slave_mysqld;
@@ -208,6 +209,7 @@ our $opt_current_test;
our $opt_ddd;
our $opt_debug;
our $opt_do_test;
+our @opt_cases; # The test cases names in argv
our $opt_embedded_server;
our $opt_extern;
our $opt_fast;
@@ -232,8 +234,6 @@ our $opt_local_master;
our $master; # Will be struct in C
our $slave;
-our $opt_master_myport;
-our $opt_slave_myport;
our $opt_ndbcluster_port;
our $opt_ndbconnectstring;
@@ -284,6 +284,11 @@ our $opt_warnings;
our $opt_with_ndbcluster;
our $opt_with_openssl;
+our $exe_ndb_mgm;
+our $path_ndb_tools_dir;
+our $path_ndb_backup_dir;
+our $file_ndb_testrun_log;
+our $flag_ndb_status_ok= 1;
######################################################################
#
@@ -297,8 +302,7 @@ sub command_line_setup ();
sub executable_setup ();
sub environment_setup ();
sub kill_and_cleanup ();
-sub collect_test_cases ($);
-sub sleep_until_file_created ($$);
+sub ndbcluster_install ();
sub ndbcluster_start ();
sub ndbcluster_stop ();
sub run_benchmarks ($);
@@ -306,6 +310,7 @@ sub run_tests ();
sub mysql_install_db ();
sub install_db ($$);
sub run_testcase ($);
+sub report_failure_and_restart ($);
sub do_before_start_master ($$);
sub do_before_start_slave ($$);
sub mysqld_start ($$$$);
@@ -347,18 +352,20 @@ sub main () {
kill_and_cleanup();
mysql_install_db();
- if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster )
- {
- ndbcluster_start(); # We start the cluster storage engine
- }
-
# mysql_loadstd(); FIXME copying from "std_data" .frm and
# .MGR but there are none?!
}
if ( $opt_start_and_exit )
{
- mtr_report("Servers started, exiting");
+ if ( mysqld_start('master',0,[],[]) )
+ {
+ mtr_report("Servers started, exiting");
+ }
+ else
+ {
+ mtr_error("Can't start the mysqld server");
+ }
}
else
{
@@ -447,8 +454,8 @@ sub command_line_setup () {
$path_manager_log= "$glob_mysql_test_dir/var/log/manager.log";
$opt_current_test= "$glob_mysql_test_dir/var/log/current_test";
- $opt_master_myport= 9306;
- $opt_slave_myport= 9308;
+ my $opt_master_myport= 9306;
+ my $opt_slave_myport= 9308;
$opt_ndbcluster_port= 9350;
# Read the command line
@@ -532,6 +539,8 @@ sub command_line_setup () {
usage("");
}
+ @opt_cases= @ARGV;
+
# Put this into a hash, will be a C struct
$master->[0]->{'path_myddir'}= "$glob_mysql_test_dir/var/master-data";
@@ -542,6 +551,8 @@ sub command_line_setup () {
$master->[0]->{'path_myport'}= $opt_master_myport;
$master->[0]->{'start_timeout'}= 400; # enough time create innodb tables
+ $master->[0]->{'ndbcluster'}= 1; # ndbcluster not started
+
$master->[1]->{'path_myddir'}= "$glob_mysql_test_dir/var/master1-data";
$master->[1]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/master1.err";
$master->[1]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/master1.log";
@@ -598,7 +609,7 @@ sub command_line_setup () {
# Look at the command line options and set script flags
# --------------------------------------------------------------------------
- if ( $opt_record and ! @ARGV)
+ if ( $opt_record and ! @opt_cases )
{
mtr_error("Will not run in record mode without a specific test case");
}
@@ -672,6 +683,10 @@ sub command_line_setup () {
$glob_use_running_ndbcluster= 1;
$opt_with_ndbcluster= 1;
}
+ else
+ {
+ $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port";
+ }
# FIXME
@@ -733,7 +748,8 @@ sub executable_setup () {
{
mtr_error("Can't find embedded server 'mysqltest'");
}
- $path_tests_bindir= "$glob_basedir/libmysqld/examples";
+ $exe_mysql_client_test=
+ "$glob_basedir/libmysqld/examples/mysql_client_test_embedded";
}
else
{
@@ -749,7 +765,8 @@ sub executable_setup () {
{
$exe_mysqltest= "$glob_basedir/client/mysqltest";
}
- $path_tests_bindir= "$glob_basedir/tests";
+ $exe_mysql_client_test=
+ "$glob_basedir/tests/mysql_client_test";
}
if ( -f "$glob_basedir/client/.libs/mysqldump" )
{
@@ -768,22 +785,29 @@ sub executable_setup () {
$exe_mysqlbinlog= "$glob_basedir/client/mysqlbinlog";
}
- $exe_mysqld= "$glob_basedir/sql/mysqld";
- $path_client_bindir= "$glob_basedir/client";
- $exe_mysqladmin= "$path_client_bindir/mysqladmin";
- $exe_mysql= "$path_client_bindir/mysql";
- $path_language= "$glob_basedir/sql/share/english/";
- $path_charsetsdir= "$glob_basedir/sql/share/charsets";
+ $path_client_bindir= "$glob_basedir/client";
+ $exe_mysqld= "$glob_basedir/sql/mysqld";
+ $exe_mysqladmin= "$path_client_bindir/mysqladmin";
+ $exe_mysql= "$path_client_bindir/mysql";
+ $exe_mysql_fix_system_tables= "$glob_basedir/scripts/mysql_fix_privilege_tables";
+ $path_language= "$glob_basedir/sql/share/english/";
+ $path_charsetsdir= "$glob_basedir/sql/share/charsets";
+
+ $path_ndb_tools_dir= "$glob_basedir/ndb/tools";
+ $exe_ndb_mgm= "$glob_basedir/ndb/src/mgmclient/ndb_mgm";
}
else
{
- $path_client_bindir= "$glob_basedir/bin";
- $path_tests_bindir= "$glob_basedir/tests";
- $exe_mysqltest= "$path_client_bindir/mysqltest";
- $exe_mysqldump= "$path_client_bindir/mysqldump";
- $exe_mysqlbinlog= "$path_client_bindir/mysqlbinlog";
- $exe_mysqladmin= "$path_client_bindir/mysqladmin";
- $exe_mysql= "$path_client_bindir/mysql";
+ my $path_tests_bindir= "$glob_basedir/tests";
+
+ $path_client_bindir= "$glob_basedir/bin";
+ $exe_mysqltest= "$path_client_bindir/mysqltest";
+ $exe_mysqldump= "$path_client_bindir/mysqldump";
+ $exe_mysqlbinlog= "$path_client_bindir/mysqlbinlog";
+ $exe_mysqladmin= "$path_client_bindir/mysqladmin";
+ $exe_mysql= "$path_client_bindir/mysql";
+ $exe_mysql_fix_system_tables= "$path_client_bindir/scripts/mysql_fix_privilege_tables";
+
if ( -d "$glob_basedir/share/mysql/english" )
{
$path_language ="$glob_basedir/share/mysql/english/";
@@ -804,6 +828,36 @@ sub executable_setup () {
$exe_mysqld= "$glob_basedir/bin/mysqld";
}
+ if ( $glob_use_embedded_server )
+ {
+ if ( -f "$path_client_bindir/mysqltest_embedded" )
+ {
+ # FIXME valgrind?
+ $exe_mysqltest="$path_client_bindir/mysqltest_embedded";
+ }
+ else
+ {
+ error("Cannot find embedded server 'mysqltest_embedded'");
+ }
+ if ( -d "$path_tests_bindir/mysql_client_test_embedded" )
+ {
+ $exe_mysql_client_test=
+ "$path_tests_bindir/mysql_client_test_embedded";
+ }
+ else
+ {
+ $exe_mysql_client_test=
+ "$path_client_bindir/mysql_client_test_embedded";
+ }
+ }
+ else
+ {
+ $exe_mysqltest="$path_client_bindir/mysqltest";
+ $exe_mysql_client_test="$path_client_bindir/mysql_client_test";
+ }
+
+ $path_ndb_tools_dir= "$glob_basedir/bin";
+ $exe_ndb_mgm= "$glob_basedir/bin/ndb_mgm";
}
# FIXME special $exe_master_mysqld and $exe_slave_mysqld
@@ -818,6 +872,10 @@ sub executable_setup () {
{
$exe_slave_mysqld= $exe_mysqld;
}
+
+ $path_ndb_backup_dir=
+ "$glob_mysql_test_dir/var/ndbcluster-$opt_ndbcluster_port";
+ $file_ndb_testrun_log= "$glob_mysql_test_dir/var/log/ndb_testrun.log";
}
@@ -846,13 +904,18 @@ sub environment_setup () {
# Also command lines in .opt files may contain env vars
# --------------------------------------------------------------------------
- $ENV{'LC_COLLATE'}= "C";
- $ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir;
- $ENV{'MASTER_MYPORT'}= $opt_master_myport;
- $ENV{'SLAVE_MYPORT'}= $opt_slave_myport;
-# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME
- $ENV{'MYSQL_TCP_PORT'}= 3306;
- $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'};
+ $ENV{'UMASK'}= "0660"; # The octal *string*
+ $ENV{'UMASK_DIR'}= "0770"; # The octal *string*
+ $ENV{'LC_COLLATE'}= "C";
+ $ENV{'USE_RUNNING_SERVER'}= $glob_use_running_server;
+ $ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir;
+ $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'};
+ $ENV{'MASTER_MYSOCK1'}= $master->[1]->{'path_mysock'};
+ $ENV{'MASTER_MYPORT'}= $master->[0]->{'path_myport'};
+ $ENV{'MASTER_MYPORT1'}= $master->[1]->{'path_myport'};
+ $ENV{'SLAVE_MYPORT'}= $slave->[0]->{'path_myport'};
+# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME
+ $ENV{'MYSQL_TCP_PORT'}= 3306;
}
@@ -877,203 +940,6 @@ sub handle_int_signal () {
##############################################################################
#
-# Collect information about test cases we are to run
-#
-##############################################################################
-
-sub collect_test_cases ($) {
- my $suite= shift; # Test suite name
-
- my $testdir;
- my $resdir;
-
- if ( $suite eq "main" )
- {
- $testdir= "$glob_mysql_test_dir/t";
- $resdir= "$glob_mysql_test_dir/r";
- }
- else
- {
- $testdir= "$glob_mysql_test_dir/suite/$suite/t";
- $resdir= "$glob_mysql_test_dir/suite/$suite/r";
- }
-
- my @tests; # Array of hash, will be array of C struct
-
- opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!");
-
- foreach my $elem ( sort readdir(TESTDIR) ) {
- my $tname= mtr_match_extension($elem,"test");
- next if ! defined $tname;
- next if $opt_do_test and ! defined mtr_match_prefix($elem,$opt_do_test);
- my $path= "$testdir/$elem";
-
- # ----------------------------------------------------------------------
- # Skip some tests silently
- # ----------------------------------------------------------------------
-
- if ( $opt_start_from and $tname lt $opt_start_from )
- {
- next;
- }
-
- # ----------------------------------------------------------------------
- # Skip some tests but include in list, just mark them to skip
- # ----------------------------------------------------------------------
-
- my $tinfo= {};
- $tinfo->{'name'}= $tname;
- $tinfo->{'result_file'}= "$resdir/$tname.result";
- push(@tests, $tinfo);
-
- if ( $opt_skip_test and defined mtr_match_prefix($tname,$opt_skip_test) )
- {
- $tinfo->{'skip'}= 1;
- next;
- }
-
- # FIXME temporary solution, we have a hard coded list of test cases to
- # skip if we are using the embedded server
-
- if ( $glob_use_embedded_server and
- mtr_match_any_exact($tname,\@skip_if_embedded_server) )
- {
- $tinfo->{'skip'}= 1;
- next;
- }
-
- # ----------------------------------------------------------------------
- # Collect information about test case
- # ----------------------------------------------------------------------
-
- $tinfo->{'path'}= $path;
- $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work
-
- if ( defined mtr_match_prefix($tname,"rpl") )
- {
- if ( $opt_skip_rpl )
- {
- $tinfo->{'skip'}= 1;
- next;
- }
-
- # FIXME currently we always restart slaves
- $tinfo->{'slave_restart'}= 1;
-
- if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' )
- {
- $tinfo->{'slave_num'}= 3;
- }
- else
- {
- $tinfo->{'slave_num'}= 1;
- }
- }
-
- # FIXME what about embedded_server + ndbcluster, skip ?!
-
- my $master_opt_file= "$testdir/$tname-master.opt";
- my $slave_opt_file= "$testdir/$tname-slave.opt";
- my $slave_mi_file= "$testdir/$tname.slave-mi";
- my $master_sh= "$testdir/$tname-master.sh";
- my $slave_sh= "$testdir/$tname-slave.sh";
-
- if ( -f $master_opt_file )
- {
- $tinfo->{'master_restart'}= 1; # We think so for now
- # This is a dirty hack from old mysql-test-run, we use the opt file
- # to flag other things as well, it is not a opt list at all
- my $extra_master_opt= mtr_get_opts_from_file($master_opt_file);
-
- foreach my $opt (@$extra_master_opt)
- {
- my $value;
-
- $value= mtr_match_prefix($opt, "--timezone=");
-
- if ( defined $value )
- {
- $tinfo->{'timezone'}= $value;
- $extra_master_opt= [];
- $tinfo->{'master_restart'}= 0;
- last;
- }
-
- $value= mtr_match_prefix($opt, "--result-file=");
-
- if ( defined $value )
- {
- $tinfo->{'result_file'}= "r/$value.result";
- if ( $opt_result_ext and $opt_record or
- -f "$tinfo->{'result_file'}$opt_result_ext")
- {
- $tinfo->{'result_file'}.= $opt_result_ext;
- }
- $extra_master_opt= [];
- $tinfo->{'master_restart'}= 0;
- last;
- }
- }
-
- $tinfo->{'master_opt'}= $extra_master_opt;
- }
-
- if ( -f $slave_opt_file )
- {
- $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file);
- $tinfo->{'slave_restart'}= 1;
- }
-
- if ( -f $slave_mi_file )
- {
- $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file);
- $tinfo->{'slave_restart'}= 1;
- }
-
- if ( -f $master_sh )
- {
- if ( $glob_win32_perl )
- {
- $tinfo->{'skip'}= 1;
- }
- else
- {
- $tinfo->{'master_sh'}= $master_sh;
- $tinfo->{'master_restart'}= 1;
- }
- }
-
- if ( -f $slave_sh )
- {
- if ( $glob_win32_perl )
- {
- $tinfo->{'skip'}= 1;
- }
- else
- {
- $tinfo->{'slave_sh'}= $slave_sh;
- $tinfo->{'slave_restart'}= 1;
- }
- }
-
- # We can't restart a running server that may be in use
-
- if ( $glob_use_running_server and
- ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) )
- {
- $tinfo->{'skip'}= 1;
- }
-
- }
-
- closedir TESTDIR;
-
- return \@tests;
-}
-
-
-##############################################################################
-#
# Handle left overs from previous runs
#
##############################################################################
@@ -1099,22 +965,19 @@ sub kill_and_cleanup () {
mtr_report("Killing Possible Leftover Processes");
mkpath("$glob_mysql_test_dir/var/log"); # Needed for mysqladmin log
mtr_kill_leftovers();
- }
- if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster )
- {
ndbcluster_stop();
+ $master->[0]->{'ndbcluster'}= 1;
}
mtr_report("Removing Stale Files");
rmtree("$glob_mysql_test_dir/var/log");
- rmtree("$glob_mysql_test_dir/var/ndbcluster");
+ rmtree("$glob_mysql_test_dir/var/ndbcluster-$opt_ndbcluster_port");
rmtree("$glob_mysql_test_dir/var/run");
rmtree("$glob_mysql_test_dir/var/tmp");
mkpath("$glob_mysql_test_dir/var/log");
- mkpath("$glob_mysql_test_dir/var/ndbcluster");
mkpath("$glob_mysql_test_dir/var/run");
mkpath("$glob_mysql_test_dir/var/tmp");
mkpath($opt_tmpdir);
@@ -1152,26 +1015,67 @@ sub kill_and_cleanup () {
# FIXME why is there a different start below?!
-sub ndbcluster_start () {
+sub ndbcluster_install () {
- mtr_report("Starting ndbcluster");
+ if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster )
+ {
+ return 0;
+ }
+ mtr_report("Install ndbcluster");
my $ndbcluster_opts= $opt_bench ? "" : "--small";
- # FIXME check result code?!
- mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port-base=$opt_ndbcluster_port",
- $ndbcluster_opts,
- "--diskless",
- "--initial",
- "--data-dir=$glob_mysql_test_dir/var"],
- "", "", "", "");
+ my $ndbcluster_port_base= $opt_ndbcluster_port + 2;
+ if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
+ ["--port=$opt_ndbcluster_port",
+ "--port-base=$ndbcluster_port_base",
+ "--data-dir=$glob_mysql_test_dir/var",
+ $ndbcluster_opts,
+ "--initial"],
+ "", "", "", "") )
+ {
+ mtr_error("Error ndbcluster_install");
+ return 1;
+ }
+
+ ndbcluster_stop();
+ $master->[0]->{'ndbcluster'}= 1;
+
+ return 0;
+}
+
+sub ndbcluster_start () {
+
+ if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster )
+ {
+ return 0;
+ }
+ # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
+ if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
+ ["--port=$opt_ndbcluster_port",
+ "--data-dir=$glob_mysql_test_dir/var"],
+ "", "/dev/null", "", "") )
+ {
+ mtr_error("Error ndbcluster_install");
+ return 1;
+ }
+
+ return 0;
}
sub ndbcluster_stop () {
+
+ if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster )
+ {
+ return;
+ }
+ my $ndbcluster_port_base= $opt_ndbcluster_port + 2;
+ # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--data-dir=$glob_mysql_test_dir/var",
- "--port-base=$opt_ndbcluster_port",
+ ["--port=$opt_ndbcluster_port",
+ "--data-dir=$glob_mysql_test_dir/var",
"--stop"],
- "", "", "", "");
+ "", "/dev/null", "", "");
+
+ return;
}
@@ -1189,6 +1093,10 @@ sub run_benchmarks ($) {
if ( ! $glob_use_embedded_server and ! $opt_local_master )
{
$master->[0]->{'pid'}= mysqld_start('master',0,[],[]);
+ if ( ! $master->[0]->{'pid'} )
+ {
+ mtr_error("Can't start the mysqld server");
+ }
}
mtr_init_args(\$args);
@@ -1254,7 +1162,7 @@ sub run_suite () {
mtr_print_thick_line();
- mtr_report("Finding Tests in the '$suite' suite");
+ mtr_report("Finding Tests in the '$suite' suite");
my $tests= collect_test_cases($suite);
@@ -1275,11 +1183,6 @@ sub run_suite () {
stop_masters_slaves();
}
- if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster )
- {
- ndbcluster_stop();
- }
-
if ( $opt_gcov )
{
gcov_collect(); # collect coverage information
@@ -1301,10 +1204,19 @@ sub run_suite () {
sub mysql_install_db () {
- mtr_report("Installing Test Databases");
-
+ # FIXME not exactly true I think, needs improvements
install_db('master', $master->[0]->{'path_myddir'});
+ install_db('master', $master->[1]->{'path_myddir'});
install_db('slave', $slave->[0]->{'path_myddir'});
+ install_db('slave', $slave->[1]->{'path_myddir'});
+ install_db('slave', $slave->[2]->{'path_myddir'});
+
+ if ( ndbcluster_install() )
+ {
+ # failed to install, disable usage but flag that its no ok
+ $opt_with_ndbcluster= 0;
+ $flag_ndb_status_ok= 0;
+ }
return 0;
}
@@ -1368,6 +1280,9 @@ sub run_testcase ($) {
mtr_tonewfile($opt_current_test,"$tname\n"); # Always tell where we are
+ # output current test to ndbcluster log file to enable diagnostics
+ mtr_tofile($file_ndb_testrun_log,"CURRENT TEST $tname\n");
+
# ----------------------------------------------------------------------
# If marked to skip, just print out and return.
# Note that a test case not marked as 'skip' can still be
@@ -1423,6 +1338,12 @@ sub run_testcase ($) {
do_before_start_master($tname,$tinfo->{'master_sh'});
# ----------------------------------------------------------------------
+ # If any mysqld servers running died, we have to know
+ # ----------------------------------------------------------------------
+
+ mtr_record_dead_children();
+
+ # ----------------------------------------------------------------------
# Start masters
# ----------------------------------------------------------------------
@@ -1435,18 +1356,37 @@ sub run_testcase ($) {
if ( ! $opt_local_master )
{
+ if ( $master->[0]->{'ndbcluster'} )
+ {
+ $master->[0]->{'ndbcluster'}= ndbcluster_start();
+ if ( $master->[0]->{'ndbcluster'} )
+ {
+ report_failure_and_restart($tinfo);
+ return;
+ }
+ }
if ( ! $master->[0]->{'pid'} )
{
$master->[0]->{'pid'}=
mysqld_start('master',0,$tinfo->{'master_opt'},[]);
+ if ( ! $master->[0]->{'pid'} )
+ {
+ report_failure_and_restart($tinfo);
+ return;
+ }
}
if ( $opt_with_ndbcluster and ! $master->[1]->{'pid'} )
{
$master->[1]->{'pid'}=
mysqld_start('master',1,$tinfo->{'master_opt'},[]);
+ if ( ! $master->[1]->{'pid'} )
+ {
+ report_failure_and_restart($tinfo);
+ return;
+ }
}
- if ( $tinfo->{'master_opt'} )
+ if ( @{$tinfo->{'master_opt'}} )
{
$master->[0]->{'uses_special_flags'}= 1;
}
@@ -1469,6 +1409,11 @@ sub run_testcase ($) {
$slave->[$idx]->{'pid'}=
mysqld_start('slave',$idx,
$tinfo->{'slave_opt'}, $tinfo->{'slave_mi'});
+ if ( ! $slave->[$idx]->{'pid'} )
+ {
+ report_failure_and_restart($tinfo);
+ return;
+ }
}
}
}
@@ -1502,30 +1447,37 @@ sub run_testcase ($) {
"mysqltest returned unexpected code $res, " .
"it has probably crashed");
}
- mtr_report_test_failed($tinfo);
- mtr_show_failed_diff($tname);
- print "\n";
- if ( ! $opt_force )
- {
- print "Aborting: $tname failed. To continue, re-run with '--force'.";
- print "\n";
- if ( ! $opt_gdb and ! $glob_use_running_server and
- ! $opt_ddd and ! $glob_use_embedded_server )
- {
- stop_masters_slaves();
- }
- exit(1);
- }
+ report_failure_and_restart($tinfo);
+ }
+ }
+}
- # FIXME always terminate on failure?!
- if ( ! $opt_gdb and ! $glob_use_running_server and
- ! $opt_ddd and ! $glob_use_embedded_server )
- {
- stop_masters_slaves();
- }
- print "Resuming Tests\n\n";
+
+sub report_failure_and_restart ($) {
+ my $tinfo= shift;
+
+ mtr_report_test_failed($tinfo);
+ mtr_show_failed_diff($tinfo->{'name'});
+ print "\n";
+ if ( ! $opt_force )
+ {
+ print "Aborting: $tinfo->{'name'} failed. To continue, re-run with '--force'.";
+ print "\n";
+ if ( ! $opt_gdb and ! $glob_use_running_server and
+ ! $opt_ddd and ! $glob_use_embedded_server )
+ {
+ stop_masters_slaves();
}
+ exit(1);
+ }
+
+ # FIXME always terminate on failure?!
+ if ( ! $opt_gdb and ! $glob_use_running_server and
+ ! $opt_ddd and ! $glob_use_embedded_server )
+ {
+ stop_masters_slaves();
}
+ print "Resuming Tests\n\n";
}
@@ -1603,11 +1555,13 @@ sub do_before_start_slave ($$) {
}
sub mysqld_arguments ($$$$$) {
- my $args= shift;
- my $type= shift; # master/slave/bootstrap
- my $idx= shift;
- my $extra_opt= shift;
- my $slave_master_info= shift;
+ my $args= shift;
+ my $type= shift; # master/slave/bootstrap
+ my $idx= shift;
+ my $extra_opt= shift;
+ my $slave_master_info= shift;
+
+# print STDERR Dumper($extra_opt);
my $sidx= ""; # Index as string, 0 is empty string
if ( $idx > 0 )
@@ -1728,17 +1682,8 @@ sub mysqld_arguments ($$$$$) {
if ( $opt_with_ndbcluster )
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
-
- if ( $glob_use_running_ndbcluster )
- {
- mtr_add_arg($args,"--ndb-connectstring=%s", $prefix,
- $opt_ndbconnectstring);
- }
- else
- {
- mtr_add_arg($args,"--ndb-connectstring=host=localhost:%d",
- $prefix, $opt_ndbcluster_port);
- }
+ mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
+ $opt_ndbconnectstring);
}
# FIXME always set nowdays??? SMALL_SERVER
@@ -1835,10 +1780,10 @@ sub mysqld_arguments ($$$$$) {
##############################################################################
sub mysqld_start ($$$$) {
- my $type= shift; # master/slave/bootstrap
- my $idx= shift;
- my $extra_opt= shift;
- my $slave_master_info= shift;
+ my $type= shift; # master/slave/bootstrap
+ my $idx= shift;
+ my $extra_opt= shift;
+ my $slave_master_info= shift;
my $args; # Arg vector
my $exe;
@@ -1893,9 +1838,8 @@ sub mysqld_start ($$$$) {
$master->[$idx]->{'path_myerr'},
$master->[$idx]->{'path_myerr'}, "") )
{
- sleep_until_file_created($master->[$idx]->{'path_mypid'},
- $master->[$idx]->{'start_timeout'});
- return $pid;
+ return sleep_until_file_created($master->[$idx]->{'path_mypid'},
+ $master->[$idx]->{'start_timeout'}, $pid);
}
}
@@ -1905,13 +1849,12 @@ sub mysqld_start ($$$$) {
$slave->[$idx]->{'path_myerr'},
$slave->[$idx]->{'path_myerr'}, "") )
{
- sleep_until_file_created($slave->[$idx]->{'path_mypid'},
- $master->[$idx]->{'start_timeout'});
- return $pid;
+ return sleep_until_file_created($slave->[$idx]->{'path_mypid'},
+ $master->[$idx]->{'start_timeout'}, $pid);
}
}
- mtr_error("Can't start mysqld FIXME");
+ return 0;
}
sub stop_masters_slaves () {
@@ -1944,7 +1887,13 @@ sub stop_masters () {
}
}
- mtr_stop_mysqld_servers(\@args, 0);
+ if ( ! $master->[0]->{'ndbcluster'} )
+ {
+ ndbcluster_stop();
+ $master->[0]->{'ndbcluster'}= 1;
+ }
+
+ mtr_stop_mysqld_servers(\@args);
}
sub stop_slaves () {
@@ -1966,7 +1915,7 @@ sub stop_slaves () {
}
}
- mtr_stop_mysqld_servers(\@args, 0);
+ mtr_stop_mysqld_servers(\@args);
}
@@ -1992,17 +1941,39 @@ sub run_mysqltest ($$) {
}
my $cmdline_mysql=
- "$exe_mysql --host=localhost --port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --user=root --password=";
+ "$exe_mysql --host=localhost --user=root --password= " .
+ "--port=$master->[0]->{'path_myport'} " .
+ "--socket=$master->[0]->{'path_mysock'}";
+
+ my $cmdline_mysql_client_test=
+ "$exe_mysql_client_test --no-defaults --testcase --user=root --silent " .
+ "--port=$master->[0]->{'path_myport'} " .
+ "--socket=$master->[0]->{'path_mysock'}";
+
+ my $cmdline_mysql_fix_system_tables=
+ "$exe_mysql_fix_system_tables --no-defaults --host=localhost --user=root --password= " .
+ "--basedir=$glob_basedir --bindir=$path_client_bindir --verbose " .
+ "--port=$master->[0]->{'path_myport'} " .
+ "--socket=$master->[0]->{'path_mysock'}";
+
+
# FIXME really needing a PATH???
# $ENV{'PATH'}= "/bin:/usr/bin:/usr/local/bin:/usr/bsd:/usr/X11R6/bin:/usr/openwin/bin:/usr/bin/X11:$ENV{'PATH'}";
- $ENV{'MYSQL'}= $exe_mysql;
+ $ENV{'MYSQL'}= $cmdline_mysql;
$ENV{'MYSQL_DUMP'}= $cmdline_mysqldump;
- $ENV{'MYSQL_BINLOG'}= $exe_mysqlbinlog;
- $ENV{'CLIENT_BINDIR'}= $path_client_bindir;
- $ENV{'TESTS_BINDIR'}= $path_tests_bindir;
+ $ENV{'MYSQL_BINLOG'}= $cmdline_mysqlbinlog;
+ $ENV{'MYSQL_FIX_SYSTEM_TABLES'}= $cmdline_mysql_fix_system_tables;
+ $ENV{'MYSQL_CLIENT_TEST'}= $cmdline_mysql_client_test;
+ $ENV{'CHARSETSDIR'}= $path_charsetsdir;
+
+ $ENV{'NDB_STATUS_OK'}= $flag_ndb_status_ok;
+ $ENV{'NDB_MGM'}= $exe_ndb_mgm;
+ $ENV{'NDB_BACKUP_DIR'}= $path_ndb_backup_dir;
+ $ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir;
+ $ENV{'NDB_TOOLS_OUTPUT'}= $file_ndb_testrun_log;
+ $ENV{'NDB_CONNECTSTRING'}= $opt_ndbconnectstring;
my $exe= $exe_mysqltest;
my $args;
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index c2f1cebbe17..39d3f0492c2 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -490,6 +490,7 @@ export MASTER_MYPORT MASTER_MYPORT1 SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MA
NDBCLUSTER_BASE_PORT=`expr $NDBCLUSTER_PORT + 2`
NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --port-base=$NDBCLUSTER_BASE_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS"
NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT
+NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log
if [ x$SOURCE_DIST = x1 ] ; then
MY_BASEDIR=$MYSQL_TEST_DIR
@@ -690,7 +691,10 @@ export CLIENT_BINDIR MYSQL_CLIENT_TEST CHARSETSDIR
export NDB_TOOLS_DIR
export NDB_MGM
export NDB_BACKUP_DIR
+export NDB_TOOLS_OUTPUT
export PURIFYOPTIONS
+NDB_STATUS_OK=1
+export NDB_STATUS_OK
MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \
--user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \
@@ -1043,6 +1047,7 @@ start_ndbcluster()
{
if [ ! -z "$USE_NDBCLUSTER" ]
then
+ rm -f $NDB_TOOLS_OUTPUT
if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then
echo "Starting ndbcluster"
@@ -1052,7 +1057,15 @@ start_ndbcluster()
else
NDBCLUSTER_EXTRA_OPTS="--small"
fi
- ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || exit 1
+ ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || NDB_STATUS_OK=0
+ if [ x$NDB_STATUS_OK != x1 ] ; then
+ if [ x$FORCE != x1 ] ; then
+ exit 1
+ fi
+ USE_NDBCLUSTER=
+ return
+ fi
+
NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT"
else
NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
@@ -1617,6 +1630,12 @@ run_testcase ()
fi
fi
fi
+
+ if [ "x$START_AND_EXIT" = "x1" ] ; then
+ echo "Servers started, exiting"
+ exit
+ fi
+
cd $MYSQL_TEST_DIR
if [ -f $tf ] ; then
@@ -1754,11 +1773,6 @@ then
mysql_loadstd
fi
-if [ "x$START_AND_EXIT" = "x1" ] ; then
- echo "Servers started, exiting"
- exit
-fi
-
$ECHO "Starting Tests"
#
diff --git a/mysql-test/r/compare.result b/mysql-test/r/compare.result
index 49ec2dd85cc..6f667aabac0 100644
--- a/mysql-test/r/compare.result
+++ b/mysql-test/r/compare.result
@@ -39,3 +39,6 @@ DROP TABLE t1;
SELECT CHAR(31) = '', '' = CHAR(31);
CHAR(31) = '' '' = CHAR(31)
0 0
+SELECT CHAR(30) = '', '' = CHAR(30);
+CHAR(30) = '' '' = CHAR(30)
+0 0
diff --git a/mysql-test/r/drop_temp_table.result b/mysql-test/r/drop_temp_table.result
index 266196877c8..a486964feb2 100644
--- a/mysql-test/r/drop_temp_table.result
+++ b/mysql-test/r/drop_temp_table.result
@@ -1,7 +1,9 @@
reset master;
create database `drop-temp+table-test`;
use `drop-temp+table-test`;
+create temporary table shortn1 (a int);
create temporary table `table:name` (a int);
+create temporary table shortn2 (a int);
select get_lock("a",10);
get_lock("a",10)
1
@@ -10,9 +12,13 @@ get_lock("a",10)
1
show binlog events;
Log_name Pos Event_type Server_id Orig_log_pos Info
-master-bin.000001 4 Start 1 4 Server ver: VERSION, Binlog ver: 3
-master-bin.000001 79 Query 1 79 create database `drop-temp+table-test`
-master-bin.000001 168 Query 1 168 use `drop-temp+table-test`; create temporary table `table:name` (a int)
-master-bin.000001 262 Query 1 262 use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name`
-master-bin.000001 391 Query 1 391 use `drop-temp+table-test`; DO RELEASE_LOCK("a")
+master-bin.000001 # Start 1 # Server ver: VERSION, Binlog ver: 3
+master-bin.000001 # Query 1 # create database `drop-temp+table-test`
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn1 (a int)
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table `table:name` (a int)
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn2 (a int)
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2`
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name`
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn1`
+master-bin.000001 # Query 1 # use `drop-temp+table-test`; DO RELEASE_LOCK("a")
drop database `drop-temp+table-test`;
diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result
index 4bb79a1cb41..fa645700875 100644
--- a/mysql-test/r/func_group.result
+++ b/mysql-test/r/func_group.result
@@ -733,3 +733,15 @@ one 2
two 2
three 1
drop table t1;
+create table t1(f1 datetime);
+insert into t1 values (now());
+create table t2 select f2 from (select max(now()) f2 from t1) a;
+show columns from t2;
+Field Type Null Key Default Extra
+f2 datetime 0000-00-00 00:00:00
+drop table t2;
+create table t2 select f2 from (select now() f2 from t1) a;
+show columns from t2;
+Field Type Null Key Default Extra
+f2 datetime 0000-00-00 00:00:00
+drop table t2, t1;
diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result
index 5a9f0f68228..2d464c891bf 100644
--- a/mysql-test/r/func_misc.result
+++ b/mysql-test/r/func_misc.result
@@ -28,3 +28,24 @@ length(format('nan', 2)) > 0
select concat("$",format(2500,2));
concat("$",format(2500,2))
$2,500.00
+create table t1 ( a timestamp );
+insert into t1 values ( '2004-01-06 12:34' );
+select a from t1 where left(a+0,6) in ( left(20040106,6) );
+a
+2004-01-06 12:34:00
+select a from t1 where left(a+0,6) = ( left(20040106,6) );
+a
+2004-01-06 12:34:00
+select a from t1 where right(a+0,6) in ( right(20040106123400,6) );
+a
+2004-01-06 12:34:00
+select a from t1 where right(a+0,6) = ( right(20040106123400,6) );
+a
+2004-01-06 12:34:00
+select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) );
+a
+2004-01-06 12:34:00
+select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) );
+a
+2004-01-06 12:34:00
+drop table t1;
diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result
index 58d66c7f712..88b1a5ea743 100644
--- a/mysql-test/r/func_str.result
+++ b/mysql-test/r/func_str.result
@@ -325,6 +325,19 @@ trim(trailing 'foo' from 'foo')
select trim(leading 'foo' from 'foo');
trim(leading 'foo' from 'foo')
+select quote(ltrim(concat(' ', 'a')));
+quote(ltrim(concat(' ', 'a')))
+'a'
+select quote(trim(concat(' ', 'a')));
+quote(trim(concat(' ', 'a')))
+'a'
+CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3;
+SELECT QUOTE('A') FROM t1;
+QUOTE('A')
+'A'
+'A'
+'A'
+DROP TABLE t1;
select 1=_latin1'1';
1=_latin1'1'
1
@@ -691,12 +704,6 @@ select count(*) as total, left(c,10) as reg from t1 group by reg order by reg de
total reg
10 2004-12-10
drop table t1;
-select quote(ltrim(concat(' ', 'a')));
-quote(ltrim(concat(' ', 'a')))
-'a'
-select quote(trim(concat(' ', 'a')));
-quote(trim(concat(' ', 'a')))
-'a'
select trim(null from 'kate') as "must_be_null";
must_be_null
NULL
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index f92b3ea4f4d..17b1bb03d1d 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -629,15 +629,6 @@ explain SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using filesort
DROP TABLE t1;
-create table t1 ( col1 int, col2 int );
-insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2);
-select group_concat( distinct col1 ) as alias from t1
-group by col2 having alias like '%';
-alias
-1,2
-1,2
-1
-drop table t1;
create table t1 (a int);
insert into t1 values(null);
select min(a) is null from t1;
@@ -650,3 +641,39 @@ select 1 and min(a) is null from t1;
1 and min(a) is null
1
drop table t1;
+create table t1 ( col1 int, col2 int );
+insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2);
+select group_concat( distinct col1 ) as alias from t1
+group by col2 having alias like '%';
+alias
+1,2
+1,2
+1
+drop table t1;
+create table t1 (a integer, b integer, c integer);
+insert into t1 (a,b) values (1,2),(1,3),(2,5);
+select a, 0.1*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1;
+a r2 r1
+1 1.0 2
+select a, rand()*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1;
+a r2 r1
+1 1 2
+select a,sum(b) from t1 where a=1 group by c;
+a sum(b)
+1 5
+select a*sum(b) from t1 where a=1 group by c;
+a*sum(b)
+5
+select sum(a)*sum(b) from t1 where a=1 group by c;
+sum(a)*sum(b)
+10
+select a,sum(b) from t1 where a=1 group by c having a=1;
+a sum(b)
+1 5
+select a as d,sum(b) from t1 where a=1 group by c having d=1;
+d sum(b)
+1 5
+select sum(a)*sum(b) as d from t1 where a=1 group by c having d > 0;
+d
+10
+drop table t1;
diff --git a/mysql-test/r/insert_select.result.es b/mysql-test/r/insert_select.result.es
index 9e11402733d..9cac6d31b8f 100644
--- a/mysql-test/r/insert_select.result.es
+++ b/mysql-test/r/insert_select.result.es
@@ -633,3 +633,15 @@ No Field Count
0 1 100
0 2 100
drop table t1, t2;
+CREATE TABLE t1 (
+ID int(11) NOT NULL auto_increment,
+NO int(11) NOT NULL default '0',
+SEQ int(11) NOT NULL default '0',
+PRIMARY KEY (ID),
+KEY t1$NO (SEQ,NO)
+) ENGINE=MyISAM;
+INSERT INTO t1 (SEQ, NO) SELECT "1" AS SEQ, IF(MAX(NO) IS NULL, 0, MAX(NO)) + 1 AS NO FROM t1 WHERE (SEQ = 1);
+select SQL_BUFFER_RESULT * from t1 WHERE (SEQ = 1);
+ID NO SEQ
+1 1 1
+drop table t1;
diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result
index c82105e6a49..6a3d2bffab0 100644
--- a/mysql-test/r/limit.result
+++ b/mysql-test/r/limit.result
@@ -67,3 +67,12 @@ SELECT * FROM t1;
id id2
3 0
DROP TABLE t1;
+create table t1 (a integer);
+insert into t1 values (1);
+select 1 as a from t1 union all select 1 from dual limit 1;
+a
+1
+(select 1 as a from t1) union all (select 1 from dual) limit 1;
+a
+1
+drop table t1;
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index 6ec5338acbe..a6396080ef0 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -573,3 +573,37 @@ select * from t1 where a12345678901234567890123456789a1234567890=2;
a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890
5 2
drop table t1;
+create table t1
+(a bigint, b bigint, c bigint, d bigint,
+primary key (a,b,c,d))
+engine=ndb
+max_rows=200000000;
+Warnings:
+Warning 1105 Ndb might have problems storing the max amount of rows specified
+insert into t1 values
+(1,2,3,4),(2,3,4,5),(3,4,5,6),
+(3,2,3,4),(1,3,4,5),(2,4,5,6),
+(1,2,3,5),(2,3,4,8),(3,4,5,9),
+(3,2,3,5),(1,3,4,8),(2,4,5,9),
+(1,2,3,6),(2,3,4,6),(3,4,5,7),
+(3,2,3,6),(1,3,4,6),(2,4,5,7),
+(1,2,3,7),(2,3,4,7),(3,4,5,8),
+(3,2,3,7),(1,3,4,7),(2,4,5,8),
+(1,3,3,4),(2,4,4,5),(3,5,5,6),
+(3,3,3,4),(1,4,4,5),(2,5,5,6),
+(1,3,3,5),(2,4,4,8),(3,5,5,9),
+(3,3,3,5),(1,4,4,8),(2,5,5,9),
+(1,3,3,6),(2,4,4,6),(3,5,5,7),
+(3,3,3,6),(1,4,4,6),(2,5,5,7),
+(1,3,3,7),(2,4,4,7),(3,5,5,8),
+(3,3,3,7),(1,4,4,7),(2,5,5,8);
+select count(*) from t1;
+count(*)
+48
+drop table t1;
+create table t1
+(a bigint, b bigint, c bigint, d bigint,
+primary key (a))
+engine=ndb
+max_rows=1;
+drop table t1;
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index 437fd624ae1..03dcc23c919 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -2196,15 +2196,3 @@ ERROR 42S22: Reference 'xx' not supported (forward reference in item list)
select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL;
ERROR 42S22: Reference 'xx' not supported (forward reference in item list)
drop table t1;
-CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB;
-CREATE TABLE t2 LIKE t1;
-INSERT INTO t1 VALUES (1,1,1);
-INSERT INTO t2 VALUES (1,1,1);
-PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having
-count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)";
-EXECUTE my_stmt;
-b count(*)
-EXECUTE my_stmt;
-b count(*)
-deallocate prepare my_stmt;
-drop table t1,t2;
diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result
index 0b813a07a1d..0666fd76661 100644
--- a/mysql-test/r/subselect_innodb.result
+++ b/mysql-test/r/subselect_innodb.result
@@ -140,3 +140,15 @@ id date1 coworkerid description sum_used sum_remaining comments
6 2004-01-01 1 test 22 33 comment
7 2004-01-01 1 test 22 33 comment
drop table t1;
+CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB;
+CREATE TABLE t2 LIKE t1;
+INSERT INTO t1 VALUES (1,1,1);
+INSERT INTO t2 VALUES (1,1,1);
+PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having
+count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)";
+EXECUTE my_stmt;
+b count(*)
+EXECUTE my_stmt;
+b count(*)
+deallocate prepare my_stmt;
+drop table t1,t2;
diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result
index 71d1b9ad381..3428b5969d9 100644
--- a/mysql-test/r/type_date.result
+++ b/mysql-test/r/type_date.result
@@ -96,3 +96,11 @@ f2
19781126
19781126
DROP TABLE t1, t2, t3;
+CREATE TABLE t1 (y YEAR);
+INSERT INTO t1 VALUES ('abc');
+Warnings:
+Warning 1265 Data truncated for column 'y' at row 1
+SELECT * FROM t1;
+y
+0000
+DROP TABLE t1;
diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es
index b93539b6bea..5fcf9213f83 100644
--- a/mysql-test/r/type_float.result.es
+++ b/mysql-test/r/type_float.result.es
@@ -143,6 +143,15 @@ drop table t1;
create table t1 (f float(54));
ERROR 42000: Incorrect column specifier for column 'f'
drop table if exists t1;
+create table t1 (d1 double, d2 double unsigned);
+insert into t1 set d1 = -1.0;
+update t1 set d2 = d1;
+Warnings:
+Warning 1264 Data truncated; out of range for column 'd2' at row 1
+select * from t1;
+d1 d2
+-1 0
+drop table t1;
create table t1 (f float(4,3));
insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11");
Warnings:
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index f07bdad9021..115ef6a47f9 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -1137,3 +1137,39 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
drop table t2;
+create table t1(a1 int, f1 char(10));
+create table t2
+select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a
+union
+select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a
+order by f2, a1;
+show columns from t2;
+Field Type Null Key Default Extra
+f2 date YES NULL
+a1 int(11) YES NULL
+drop table t1, t2;
+create table t1 (f1 int);
+create table t2 (f1 int, f2 int ,f3 date);
+create table t3 (f1 int, f2 char(10));
+create table t4
+(
+select t2.f3 as sdate
+from t1
+left outer join t2 on (t1.f1 = t2.f1)
+inner join t3 on (t2.f2 = t3.f1)
+order by t1.f1, t3.f1, t2.f3
+)
+union
+(
+select cast('2004-12-31' as date) as sdate
+from t1
+left outer join t2 on (t1.f1 = t2.f1)
+inner join t3 on (t2.f2 = t3.f1)
+group by t1.f1
+order by t1.f1, t3.f1, t2.f3
+)
+order by sdate;
+show columns from t4;
+Field Type Null Key Default Extra
+sdate date YES NULL
+drop table t1, t2, t3, t4;
diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result
index beab6105f79..ac370db9ecc 100644
--- a/mysql-test/r/update.result
+++ b/mysql-test/r/update.result
@@ -212,3 +212,10 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20);
update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1";
update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10;
drop table t1, t2;
+create table t1 (id int not null auto_increment primary key, id_str varchar(32));
+insert into t1 (id_str) values ("test");
+update t1 set id_str = concat(id_str, id) where id = last_insert_id();
+select * from t1;
+id id_str
+1 test1
+drop table t1;
diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result
index 81846391795..041d1b836b7 100644
--- a/mysql-test/r/user_var.result
+++ b/mysql-test/r/user_var.result
@@ -109,8 +109,8 @@ select @a:=0;
select @a, @a:=@a+count(*), count(*), @a from t1 group by i;
@a @a:=@a+count(*) count(*) @a
0 1 1 0
-0 2 2 0
-0 3 3 0
+0 3 2 0
+0 6 3 0
select @a:=0;
@a:=0
0
diff --git a/mysql-test/t/compare.test b/mysql-test/t/compare.test
index e3c042e608a..bc20786227b 100644
--- a/mysql-test/t/compare.test
+++ b/mysql-test/t/compare.test
@@ -33,3 +33,5 @@ DROP TABLE t1;
# Bug #8134: Comparison against CHAR(31) at end of string
SELECT CHAR(31) = '', '' = CHAR(31);
+# Extra test
+SELECT CHAR(30) = '', '' = CHAR(30);
diff --git a/mysql-test/t/drop_temp_table.test b/mysql-test/t/drop_temp_table.test
index 1a7d8796bb3..dcd95721179 100644
--- a/mysql-test/t/drop_temp_table.test
+++ b/mysql-test/t/drop_temp_table.test
@@ -4,7 +4,9 @@ connection con1;
reset master;
create database `drop-temp+table-test`;
use `drop-temp+table-test`;
+create temporary table shortn1 (a int);
create temporary table `table:name` (a int);
+create temporary table shortn2 (a int);
select get_lock("a",10);
disconnect con1;
@@ -15,5 +17,6 @@ connection con2;
select get_lock("a",10);
let $VERSION=`select version()`;
--replace_result $VERSION VERSION
+--replace_column 2 # 5 #
show binlog events;
drop database `drop-temp+table-test`;
diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test
index 79d6112e6de..465611a5ebb 100644
--- a/mysql-test/t/func_group.test
+++ b/mysql-test/t/func_group.test
@@ -473,3 +473,17 @@ INSERT INTO t1 VALUES
select val, count(*) from t1 group by val;
drop table t1;
+
+
+#
+# Bug 7833: Wrong datatype of aggregate column is returned
+#
+
+create table t1(f1 datetime);
+insert into t1 values (now());
+create table t2 select f2 from (select max(now()) f2 from t1) a;
+show columns from t2;
+drop table t2;
+create table t2 select f2 from (select now() f2 from t1) a;
+show columns from t2;
+drop table t2, t1;
diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test
index e73f2a1b26c..89aba7ee583 100644
--- a/mysql-test/t/func_misc.test
+++ b/mysql-test/t/func_misc.test
@@ -23,3 +23,18 @@ select length(format('nan', 2)) > 0;
# Test for bug #628
#
select concat("$",format(2500,2));
+
+# Test for BUG#7716
+create table t1 ( a timestamp );
+insert into t1 values ( '2004-01-06 12:34' );
+select a from t1 where left(a+0,6) in ( left(20040106,6) );
+select a from t1 where left(a+0,6) = ( left(20040106,6) );
+
+select a from t1 where right(a+0,6) in ( right(20040106123400,6) );
+select a from t1 where right(a+0,6) = ( right(20040106123400,6) );
+
+select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) );
+select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) );
+
+drop table t1;
+
diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test
index 34bbb2bab0f..6d5974ca5ed 100644
--- a/mysql-test/t/func_str.test
+++ b/mysql-test/t/func_str.test
@@ -195,6 +195,18 @@ select trim(trailing 'foo' from 'foo');
select trim(leading 'foo' from 'foo');
#
+# crashing bug with QUOTE() and LTRIM() or TRIM() fixed
+# Bug #7495
+#
+
+select quote(ltrim(concat(' ', 'a')));
+select quote(trim(concat(' ', 'a')));
+
+# Bad results from QUOTE(). Bug #8248
+CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3;
+SELECT QUOTE('A') FROM t1;
+DROP TABLE t1;
+
# Test collation and coercibility
#
@@ -429,12 +441,6 @@ create table t1 (a int not null primary key, b varchar(40), c datetime);
insert into t1 (a,b,c) values (1,'Tom','2004-12-10 12:13:14'),(2,'ball games','2004-12-10 12:13:14'), (3,'Basil','2004-12-10 12:13:14'), (4,'Dean','2004-12-10 12:13:14'),(5,'Ellis','2004-12-10 12:13:14'), (6,'Serg','2004-12-10 12:13:14'), (7,'Sergei','2004-12-10 12:13:14'),(8,'Georg','2004-12-10 12:13:14'),(9,'Salle','2004-12-10 12:13:14'),(10,'Sinisa','2004-12-10 12:13:14');
select count(*) as total, left(c,10) as reg from t1 group by reg order by reg desc limit 0,12;
drop table t1;
-# crashing bug with QUOTE() and LTRIM() or TRIM() fixed
-# Bug #7495
-#
-
-select quote(ltrim(concat(' ', 'a')));
-select quote(trim(concat(' ', 'a')));
#
# Bug#7455 unexpected result: TRIM(<NULL> FROM <whatever>) gives NOT NULL
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index c0447b06303..379f668df1a 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -457,6 +457,14 @@ SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL;
explain SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL;
DROP TABLE t1;
+#Test for BUG#6976: Aggregate functions have incorrect NULL-ness
+create table t1 (a int);
+insert into t1 values(null);
+select min(a) is null from t1;
+select min(a) is null or null from t1;
+select 1 and min(a) is null from t1;
+drop table t1;
+
# Test for BUG#5400: GROUP_CONCAT returns everything twice.
create table t1 ( col1 int, col2 int );
insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2);
@@ -465,12 +473,18 @@ select group_concat( distinct col1 ) as alias from t1
drop table t1;
+#
+# Test BUG#8216 when referring in HAVING to n alias which is rand() function
+#
-#Test for BUG#6976: Aggregate functions have incorrect NULL-ness
-create table t1 (a int);
-insert into t1 values(null);
-select min(a) is null from t1;
-select min(a) is null or null from t1;
-select 1 and min(a) is null from t1;
+create table t1 (a integer, b integer, c integer);
+insert into t1 (a,b) values (1,2),(1,3),(2,5);
+select a, 0.1*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1;
+select a, rand()*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1;
+select a,sum(b) from t1 where a=1 group by c;
+select a*sum(b) from t1 where a=1 group by c;
+select sum(a)*sum(b) from t1 where a=1 group by c;
+select a,sum(b) from t1 where a=1 group by c having a=1;
+select a as d,sum(b) from t1 where a=1 group by c having d=1;
+select sum(a)*sum(b) as d from t1 where a=1 group by c having d > 0;
drop table t1;
-
diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test
index 61c57c9b468..28b287a5d4a 100644
--- a/mysql-test/t/limit.test
+++ b/mysql-test/t/limit.test
@@ -49,3 +49,13 @@ SELECT * FROM t1;
DELETE FROM t1 WHERE id2 = 0 ORDER BY id desc LIMIT 1;
SELECT * FROM t1;
DROP TABLE t1;
+
+#
+# Bug#8023 - limit on UNION with from DUAL, causes syntax error
+#
+create table t1 (a integer);
+insert into t1 values (1);
+# both queries must return one row
+select 1 as a from t1 union all select 1 from dual limit 1;
+(select 1 as a from t1) union all (select 1 from dual) limit 1;
+drop table t1;
diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test
index 6551732adba..037115f5e82 100644
--- a/mysql-test/t/ndb_autodiscover.test
+++ b/mysql-test/t/ndb_autodiscover.test
@@ -199,7 +199,7 @@ insert into t4 values (1, "Automatic");
select * from t4;
# Remove the table from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 >> $NDB_TOOLS_OUTPUT ;
#
# Test that correct error is returned
@@ -230,7 +230,7 @@ select * from t4;
flush tables;
# Remove the table from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 >> $NDB_TOOLS_OUTPUT ;
SHOW TABLES;
@@ -264,8 +264,8 @@ insert into t8 values (8, "myisam table 8");
insert into t9 values (9);
# Remove t3, t5 from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ;
-system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 >> $NDB_TOOLS_OUTPUT ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 >> $NDB_TOOLS_OUTPUT ;
# Remove t6, t7 from disk
system rm var/master-data/test/t6.frm > /dev/null ;
system rm var/master-data/test/t7.frm > /dev/null ;
@@ -479,4 +479,4 @@ create table t10 (
insert into t10 values (1, 'kalle');
---exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` > /dev/null 2>&1 || true
+--exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` >> $NDB_TOOLS_OUTPUT 2>&1 || true
diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test
index 2671223ada8..f460c573a9d 100644
--- a/mysql-test/t/ndb_basic.test
+++ b/mysql-test/t/ndb_basic.test
@@ -539,3 +539,41 @@ insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1);
explain select * from t1 where a12345678901234567890123456789a1234567890=2;
select * from t1 where a12345678901234567890123456789a1234567890=2;
drop table t1;
+
+#
+# test fragment creation
+#
+# first a table with _many_ fragments per node group
+# then a table with just one fragment per node group
+#
+create table t1
+ (a bigint, b bigint, c bigint, d bigint,
+ primary key (a,b,c,d))
+ engine=ndb
+ max_rows=200000000;
+insert into t1 values
+ (1,2,3,4),(2,3,4,5),(3,4,5,6),
+ (3,2,3,4),(1,3,4,5),(2,4,5,6),
+ (1,2,3,5),(2,3,4,8),(3,4,5,9),
+ (3,2,3,5),(1,3,4,8),(2,4,5,9),
+ (1,2,3,6),(2,3,4,6),(3,4,5,7),
+ (3,2,3,6),(1,3,4,6),(2,4,5,7),
+ (1,2,3,7),(2,3,4,7),(3,4,5,8),
+ (3,2,3,7),(1,3,4,7),(2,4,5,8),
+ (1,3,3,4),(2,4,4,5),(3,5,5,6),
+ (3,3,3,4),(1,4,4,5),(2,5,5,6),
+ (1,3,3,5),(2,4,4,8),(3,5,5,9),
+ (3,3,3,5),(1,4,4,8),(2,5,5,9),
+ (1,3,3,6),(2,4,4,6),(3,5,5,7),
+ (3,3,3,6),(1,4,4,6),(2,5,5,7),
+ (1,3,3,7),(2,4,4,7),(3,5,5,8),
+ (3,3,3,7),(1,4,4,7),(2,5,5,8);
+select count(*) from t1;
+drop table t1;
+
+create table t1
+ (a bigint, b bigint, c bigint, d bigint,
+ primary key (a))
+ engine=ndb
+ max_rows=1;
+drop table t1;
diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test
index 09939ec119d..d413453fb0e 100644
--- a/mysql-test/t/ndb_restore.test
+++ b/mysql-test/t/ndb_restore.test
@@ -141,10 +141,10 @@ create table t8_c engine=ndbcluster as select * from t8;
create table t9_c engine=ndbcluster as select * from t9;
---exec $NDB_MGM --no-defaults -e "start backup" > /dev/null
+--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 > /tmp/ndb_restore.out
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 > /tmp/ndb_restore.out
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT
show tables;
diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test
index cdec080611d..55400dae0be 100644
--- a/mysql-test/t/subselect.test
+++ b/mysql-test/t/subselect.test
@@ -1465,17 +1465,3 @@ select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx;
-- error 1247
select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL;
drop table t1;
-
-#
-# cleaning up of results of subselects (BUG#8125)
-#
-CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB;
-CREATE TABLE t2 LIKE t1;
-INSERT INTO t1 VALUES (1,1,1);
-INSERT INTO t2 VALUES (1,1,1);
-PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having
-count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)";
-EXECUTE my_stmt;
-EXECUTE my_stmt;
-deallocate prepare my_stmt;
-drop table t1,t2;
diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test
index aa7fe138876..5d796988178 100644
--- a/mysql-test/t/subselect_innodb.test
+++ b/mysql-test/t/subselect_innodb.test
@@ -145,3 +145,17 @@ SELECT DISTINCT
FROM t1;
select * from t1;
drop table t1;
+
+#
+# cleaning up of results of subselects (BUG#8125)
+#
+CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB;
+CREATE TABLE t2 LIKE t1;
+INSERT INTO t1 VALUES (1,1,1);
+INSERT INTO t2 VALUES (1,1,1);
+PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having
+count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)";
+EXECUTE my_stmt;
+EXECUTE my_stmt;
+deallocate prepare my_stmt;
+drop table t1,t2;
diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test
index 64420a85189..304ed19b971 100644
--- a/mysql-test/t/type_date.test
+++ b/mysql-test/t/type_date.test
@@ -107,3 +107,10 @@ SELECT * FROM t2;
SELECT * FROM t3;
DROP TABLE t1, t2, t3;
+
+# Test that setting YEAR to invalid string results in default value, not
+# 2000. (Bug #6067)
+CREATE TABLE t1 (y YEAR);
+INSERT INTO t1 VALUES ('abc');
+SELECT * FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test
index 8682808f3f3..90b2197603b 100644
--- a/mysql-test/t/union.test
+++ b/mysql-test/t/union.test
@@ -664,3 +664,38 @@ show create table t1;
drop table t1;
drop table t2;
+#
+# Bug 6931: Date Type column problem when using UNION-Table.
+#
+create table t1(a1 int, f1 char(10));
+create table t2
+select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a
+union
+select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a
+order by f2, a1;
+show columns from t2;
+drop table t1, t2;
+
+create table t1 (f1 int);
+create table t2 (f1 int, f2 int ,f3 date);
+create table t3 (f1 int, f2 char(10));
+create table t4
+(
+ select t2.f3 as sdate
+ from t1
+ left outer join t2 on (t1.f1 = t2.f1)
+ inner join t3 on (t2.f2 = t3.f1)
+ order by t1.f1, t3.f1, t2.f3
+)
+union
+(
+ select cast('2004-12-31' as date) as sdate
+ from t1
+ left outer join t2 on (t1.f1 = t2.f1)
+ inner join t3 on (t2.f2 = t3.f1)
+ group by t1.f1
+ order by t1.f1, t3.f1, t2.f3
+)
+order by sdate;
+show columns from t4;
+drop table t1, t2, t3, t4;
diff --git a/mysql-test/t/update.test b/mysql-test/t/update.test
index 704263b1216..04192f25ac8 100644
--- a/mysql-test/t/update.test
+++ b/mysql-test/t/update.test
@@ -170,3 +170,12 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20);
update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1";
update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10;
drop table t1, t2;
+
+#
+# Bug #8057
+#
+create table t1 (id int not null auto_increment primary key, id_str varchar(32));
+insert into t1 (id_str) values ("test");
+update t1 set id_str = concat(id_str, id) where id = last_insert_id();
+select * from t1;
+drop table t1;
diff --git a/mysys/my_handler.c b/mysys/my_handler.c
index df1e9e55e0a..5ee181ca78e 100644
--- a/mysys/my_handler.c
+++ b/mysys/my_handler.c
@@ -43,7 +43,7 @@ static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length,
return 0;
if (skip_end_space && a_length != b_length)
{
- int swap= 0;
+ int swap= 1;
/*
We are using space compression. We have to check if longer key
has next character < ' ', in which case it's less than the shorter
@@ -57,12 +57,12 @@ static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length,
/* put shorter key in a */
a_length= b_length;
a= b;
- swap= -1 ^ 1; /* swap sign of result */
+ swap= -1; /* swap sign of result */
}
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
return 0;
}
diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp
index 166355cae17..53830dd93c5 100644
--- a/ndb/include/ndbapi/NdbConnection.hpp
+++ b/ndb/include/ndbapi/NdbConnection.hpp
@@ -687,6 +687,8 @@ private:
void remove_list(NdbOperation*& head, NdbOperation*);
void define_scan_op(NdbIndexScanOperation*);
+
+ friend class HugoOperations;
};
inline
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 0dca1c0f106..49afbd695c9 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -141,9 +141,9 @@ public:
enum FragmentType {
FragUndefined = 0, ///< Fragmentation type undefined or default
FragSingle = 1, ///< Only one fragment
- FragAllSmall = 2, ///< One fragment per node group
- FragAllMedium = 3, ///< Default value. Two fragments per node group.
- FragAllLarge = 4 ///< Eight fragments per node group.
+ FragAllSmall = 2, ///< One fragment per node, default
+ FragAllMedium = 3, ///< two fragments per node
+ FragAllLarge = 4 ///< Four fragments per node.
};
};
diff --git a/ndb/include/portlib/NdbThread.h b/ndb/include/portlib/NdbThread.h
index 212f7de9384..e86deee4354 100644
--- a/ndb/include/portlib/NdbThread.h
+++ b/ndb/include/portlib/NdbThread.h
@@ -76,7 +76,7 @@ int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status);
*
* * status: exit code
*/
-void NdbThread_Exit(int status);
+void NdbThread_Exit(void *status);
/**
* Set thread concurrency level
diff --git a/ndb/src/common/portlib/NdbPortLibTest.cpp b/ndb/src/common/portlib/NdbPortLibTest.cpp
index 55b9ccec5f2..d7892411851 100644
--- a/ndb/src/common/portlib/NdbPortLibTest.cpp
+++ b/ndb/src/common/portlib/NdbPortLibTest.cpp
@@ -54,10 +54,7 @@ extern "C" void* thread1func(void* arg)
if (arg1 != 7)
fail("TEST1", "Wrong arg");
- NdbThread_Exit(returnvalue);
-
- return NULL;
-
+ return returnvalue;
}
// test 2 variables and funcs
@@ -80,10 +77,7 @@ extern "C" void* test2func(void* arg)
fail("TEST2", "Failed to unlock mutex");
int returnvalue = arg1;
- NdbThread_Exit(returnvalue);
-
- return NULL;
-
+ return returnvalue;
}
@@ -129,8 +123,7 @@ extern "C" void* testfunc(void* arg)
}
while(tmpVar<100);
- NdbThread_Exit(0);
- return NULL;
+ return 0;
}
extern "C" void* testTryLockfunc(void* arg)
@@ -169,8 +162,7 @@ extern "C" void* testTryLockfunc(void* arg)
}
while(tmpVar<100);
- NdbThread_Exit(0);
- return NULL;
+ return 0;
}
diff --git a/ndb/src/common/portlib/NdbThread.c b/ndb/src/common/portlib/NdbThread.c
index 5f2e6021c43..aaee9b45069 100644
--- a/ndb/src/common/portlib/NdbThread.c
+++ b/ndb/src/common/portlib/NdbThread.c
@@ -17,7 +17,7 @@
#include <ndb_global.h>
#include <NdbThread.h>
-#include <pthread.h>
+#include <my_pthread.h>
#include <NdbMem.h>
#define MAX_THREAD_NAME 16
@@ -39,21 +39,28 @@ struct NdbThread
static
void*
ndb_thread_wrapper(void* _ss){
- void * ret;
- struct NdbThread * ss = (struct NdbThread *)_ss;
- DBUG_ENTER("ndb_thread_wrapper");
-#ifdef NDB_SHM_TRANSPORTER
- if (g_ndb_shm_signum)
+ my_thread_init();
{
- sigset_t mask;
- DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
- sigemptyset(&mask);
- sigaddset(&mask, g_ndb_shm_signum);
- pthread_sigmask(SIG_BLOCK, &mask, 0);
- }
+ DBUG_ENTER("ndb_thread_wrapper");
+#ifdef NDB_SHM_TRANSPORTER
+ if (g_ndb_shm_signum)
+ {
+ sigset_t mask;
+ DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
+ sigemptyset(&mask);
+ sigaddset(&mask, g_ndb_shm_signum);
+ pthread_sigmask(SIG_BLOCK, &mask, 0);
+ }
#endif
- ret= (* ss->func)(ss->object);
- DBUG_RETURN(ret);
+ {
+ void *ret;
+ struct NdbThread * ss = (struct NdbThread *)_ss;
+ ret= (* ss->func)(ss->object);
+ NdbThread_Exit(ret);
+ }
+ /* will never be reached */
+ DBUG_RETURN(0);
+ }
}
@@ -130,9 +137,10 @@ int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status)
}
-void NdbThread_Exit(int status)
+void NdbThread_Exit(void *status)
{
- pthread_exit(&status);
+ my_thread_end();
+ pthread_exit(status);
}
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index 462cde76740..439730435ec 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -1104,11 +1104,8 @@ TransporterRegistry::setIOState(NodeId nodeId, IOState state) {
static void *
run_start_clients_C(void * me)
{
- my_thread_init();
((TransporterRegistry*) me)->start_clients_thread();
- my_thread_end();
- NdbThread_Exit(0);
- return me;
+ return 0;
}
// Run by kernel thread
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index 6b23da774af..53fa5d69215 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -526,6 +526,7 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32
union { const Uint32* p; const unsigned char* v; } u1, u2;
u1.p = p1;
u2.p = p2;
+#ifdef ndb_date_sol9x86_cc_xO3_madness
// from Field_newdate::val_int
Uint64 j1 = uint3korr(u1.v);
Uint64 j2 = uint3korr(u2.v);
@@ -536,6 +537,33 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32
if (j1 > j2)
return +1;
return 0;
+#else
+ uint j1 = uint3korr(u1.v);
+ uint j2 = uint3korr(u2.v);
+ uint d1 = (j1 & 31);
+ uint d2 = (j2 & 31);
+ j1 = (j1 >> 5);
+ j2 = (j2 >> 5);
+ uint m1 = (j1 & 15);
+ uint m2 = (j2 & 15);
+ j1 = (j1 >> 4);
+ j2 = (j2 >> 4);
+ uint y1 = j1;
+ uint y2 = j2;
+ if (y1 < y2)
+ return -1;
+ if (y1 > y2)
+ return +1;
+ if (m1 < m2)
+ return -1;
+ if (m1 > m2)
+ return +1;
+ if (d1 < d2)
+ return -1;
+ if (d1 > d2)
+ return +1;
+ return 0;
+#endif
#endif
}
diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp
index 8bee256684d..da06389b5df 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/ndb/src/common/util/SocketServer.cpp
@@ -186,11 +186,7 @@ extern "C"
void*
socketServerThread_C(void* _ss){
SocketServer * ss = (SocketServer *)_ss;
-
- my_thread_init();
ss->doRun();
- my_thread_end();
- NdbThread_Exit(0);
return 0;
}
@@ -309,11 +305,8 @@ void*
sessionThread_C(void* _sc){
SocketServer::Session * si = (SocketServer::Session *)_sc;
- my_thread_init();
if(!transfer(si->m_socket)){
si->m_stopped = true;
- my_thread_end();
- NdbThread_Exit(0);
return 0;
}
@@ -325,8 +318,6 @@ sessionThread_C(void* _sc){
}
si->m_stopped = true;
- my_thread_end();
- NdbThread_Exit(0);
return 0;
}
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 5c7cc597672..a82c96beebd 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -5704,7 +5704,8 @@ void Dbacc::commitOperation(Signal* signal)
Uint32 tmp2Olq;
if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) &&
- (operationRecPtr.p->operation != ZSCAN_OP)) {
+ (operationRecPtr.p->operation != ZSCAN_OP) &&
+ (operationRecPtr.p->operation != ZREAD)) {
jam();
/* This method is used to check whether the end result of the transaction
will be to delete the tuple. In this case all operation will be marked
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index dba1efbba9a..0bc8351a9db 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -6178,7 +6178,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
break;
case DictTabInfo::AllNodesMediumTable:
jam();
- noOfFragments = csystemnodes;
+ noOfFragments = 2 * csystemnodes;
break;
case DictTabInfo::AllNodesLargeTable:
jam();
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 8bbbc72a38d..c79f4dfc6c7 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -8719,13 +8719,14 @@ void Dblqh::finishScanrec(Signal* signal)
return;
}
+ ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE);
+
ScanRecordPtr tmpScan = scanptr;
TcConnectionrecPtr tmpTc = tcConnectptr;
tcConnectptr.i = restart.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
restart.p->scanNumber = scanNumber;
- restart.p->scanState = ScanRecord::WAIT_ACC_SCAN;
queue.remove(restart);
scans.add(restart);
@@ -8740,10 +8741,18 @@ void Dblqh::finishScanrec(Signal* signal)
ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
#endif
}
-
- scanptr = restart;
- continueAfterReceivingAllAiLab(signal);
-
+
+ restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec
+ if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED)
+ {
+ jam();
+ scanptr = restart;
+ continueAfterReceivingAllAiLab(signal);
+ }
+ else
+ {
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI);
+ }
scanptr = tmpScan;
tcConnectptr = tmpTc;
}//Dblqh::finishScanrec()
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index c804fa32bd2..97931041e2a 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -9028,6 +9028,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
scanFragptr.p->lqhBlockref = ref;
scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
sendScanFragReq(signal, scanptr.p, scanFragptr.p);
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
attrbufptr.i = cachePtr.p->firstAttrbuf;
while (attrbufptr.i != RNIL) {
jam();
@@ -9037,6 +9039,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
attrbufptr.p,
ref);
attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
}//while
scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
scanFragptr.p->startFragTimer(ctcTimer);
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index ad6c0fd5283..f76440a462a 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -82,7 +82,6 @@ static int numAsyncFiles = 0;
extern "C" void * runAsyncFile(void* arg)
{
- my_thread_init();
((AsyncFile*)arg)->run();
return (NULL);
}
@@ -876,8 +875,6 @@ void AsyncFile::endReq()
{
// Thread is ended with return
if (theWriteBuffer) NdbMem_Free(theWriteBuffer);
- my_thread_end();
- NdbThread_Exit(0);
}
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
index aeab9f7828d..b98c60693f4 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
@@ -40,7 +40,6 @@ extern "C" void* runProducer(void*arg)
NdbSleep_MilliSleep(i);
i++;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -58,7 +57,6 @@ extern "C" void* runConsumer(void* arg)
delete p;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -92,7 +90,6 @@ extern "C" void* runProducer2(void*arg)
NdbSleep_MilliSleep(i);
i++;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -111,7 +108,6 @@ extern "C" void* runConsumer2(void* arg)
delete p;
}
ndbout << "Consumer2: " << count << " received" << endl;
- NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/kernel/vm/FastScheduler.cpp b/ndb/src/kernel/vm/FastScheduler.cpp
index eca456d26dd..d05c02360a7 100644
--- a/ndb/src/kernel/vm/FastScheduler.cpp
+++ b/ndb/src/kernel/vm/FastScheduler.cpp
@@ -76,19 +76,26 @@ FastScheduler::activateSendPacked()
globalData.loopMax = 2048;
}//FastScheduler::activateSendPacked()
+//------------------------------------------------------------------------
+// sendPacked is executed at the end of the loop.
+// To ensure that we don't send any messages before executing all local
+// packed signals we do another turn in the loop (unless we have already
+// executed too many signals in the loop).
+//------------------------------------------------------------------------
void
FastScheduler::doJob()
{
+ Uint32 init_loopCount = 0;
+ Uint32 TminLoops = getBOccupancy() + EXTRA_SIGNALS_PER_DO_JOB;
+ Uint32 TloopMax = (Uint32)globalData.loopMax;
+ if (TminLoops < TloopMax) {
+ TloopMax = TminLoops;
+ }//if
+ if (TloopMax < MIN_NUMBER_OF_SIG_PER_DO_JOB) {
+ TloopMax = MIN_NUMBER_OF_SIG_PER_DO_JOB;
+ }//if
do{
- Uint32 loopCount = 0;
- Uint32 TminLoops = getBOccupancy() + EXTRA_SIGNALS_PER_DO_JOB;
- Uint32 TloopMax = (Uint32)globalData.loopMax;
- if (TminLoops < TloopMax) {
- TloopMax = TminLoops;
- }//if
- if (TloopMax < MIN_NUMBER_OF_SIG_PER_DO_JOB) {
- TloopMax = MIN_NUMBER_OF_SIG_PER_DO_JOB;
- }//if
+ Uint32 loopCount = init_loopCount;
register Uint32 tHighPrio = globalData.highestAvailablePrio;
register Signal* signal = getVMSignals();
while ((tHighPrio < LEVEL_IDLE) && (loopCount < TloopMax)) {
@@ -151,7 +158,7 @@ FastScheduler::doJob()
if (globalData.sendPackedActivated == 1) {
Uint32 t1 = theDoJobTotalCounter;
Uint32 t2 = theDoJobCallCounter;
- t1 += loopCount;
+ t1 += (loopCount - init_loopCount);
t2++;
theDoJobTotalCounter = t1;
theDoJobCallCounter = t2;
@@ -161,7 +168,11 @@ FastScheduler::doJob()
theDoJobTotalCounter = 0;
}//if
}//if
- } while (getBOccupancy() > MAX_OCCUPANCY);
+ init_loopCount = loopCount;
+ sendPacked();
+ } while ((getBOccupancy() > MAX_OCCUPANCY) ||
+ ((init_loopCount < TloopMax) &&
+ (globalData.highestAvailablePrio < LEVEL_IDLE)));
}//FastScheduler::doJob()
void FastScheduler::sendPacked()
diff --git a/ndb/src/kernel/vm/ThreadConfig.cpp b/ndb/src/kernel/vm/ThreadConfig.cpp
index 4844bb9a477..76fcc4ba84f 100644
--- a/ndb/src/kernel/vm/ThreadConfig.cpp
+++ b/ndb/src/kernel/vm/ThreadConfig.cpp
@@ -173,9 +173,6 @@ void ThreadConfig::ipControlLoop()
// until all buffers are empty or until we have executed 2048 signals.
//--------------------------------------------------------------------
globalScheduler.doJob();
-
- globalScheduler.sendPacked();
-
}//while
globalData.incrementWatchDogCounter(6);
diff --git a/ndb/src/kernel/vm/WatchDog.cpp b/ndb/src/kernel/vm/WatchDog.cpp
index 4e07dc1df90..23475a478d3 100644
--- a/ndb/src/kernel/vm/WatchDog.cpp
+++ b/ndb/src/kernel/vm/WatchDog.cpp
@@ -27,10 +27,7 @@
extern "C"
void*
runWatchDog(void* w){
- my_thread_init();
((WatchDog*)w)->run();
- my_thread_end();
- NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index cbf7776fe06..025bed2bc09 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -457,8 +457,6 @@ event_thread_run(void* m)
{
NdbMgmHandle handle= *(NdbMgmHandle*)m;
- my_thread_init();
-
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
int fd = ndb_mgm_listen_event(handle, filter);
if (fd > 0)
@@ -478,9 +476,7 @@ event_thread_run(void* m)
do_event_thread= -1;
}
- my_thread_end();
- NdbThread_Exit(0);
- return 0;
+ return NULL;
}
bool
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index 9be4af1b9b5..07310e3a8b8 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -3192,13 +3192,27 @@ fixShmKey(InitConfigFileParser::Context & ctx, const char *)
{
DBUG_ENTER("fixShmKey");
{
+ static int last_signum= -1;
Uint32 signum;
if(!ctx.m_currentSection->get("Signum", &signum))
{
signum= OPT_NDB_SHM_SIGNUM_DEFAULT;
+ if (signum <= 0)
+ {
+ ctx.reportError("Unable to set default parameter for [SHM]Signum"
+ " please specify [SHM DEFAULT]Signum");
+ return false;
+ }
ctx.m_currentSection->put("Signum", signum);
DBUG_PRINT("info",("Added Signum=%u", signum));
}
+ if ( last_signum != (int)signum && last_signum >= 0 )
+ {
+ ctx.reportError("All shared memory transporters must have same [SHM]Signum defined."
+ " Use [SHM DEFAULT]Signum");
+ return false;
+ }
+ last_signum= (int)signum;
}
{
Uint32 id1= 0, id2= 0, key= 0;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index f698099141a..66c9a6448aa 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -70,12 +70,7 @@ void *
MgmtSrvr::logLevelThread_C(void* m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
- my_thread_init();
mgm->logLevelThreadRun();
-
- my_thread_end();
- NdbThread_Exit(0);
- /* NOTREACHED */
return 0;
}
@@ -83,12 +78,7 @@ void *
MgmtSrvr::signalRecvThread_C(void *m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
- my_thread_init();
mgm->signalRecvThreadRun();
-
- my_thread_end();
- NdbThread_Exit(0);
- /* NOTREACHED */
return 0;
}
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index e10b2e1d82c..1fe0cedbd6c 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -54,7 +54,6 @@ runClusterMgr_C(void * me)
#ifdef NDB_OSE
NdbSleep_MilliSleep(50);
#endif
- NdbThread_Exit(0);
return NULL;
}
@@ -560,10 +559,7 @@ extern "C"
void*
runArbitMgr_C(void* me)
{
- my_thread_init();
((ArbitMgr*) me)->threadMain();
- my_thread_end();
- NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 9f6ed144fb0..530f15d3a2e 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -284,7 +284,7 @@ void
NdbTableImpl::init(){
clearNewProperties();
m_frm.clear();
- m_fragmentType = NdbDictionary::Object::FragAllMedium;
+ m_fragmentType = NdbDictionary::Object::FragAllSmall;
m_logging = true;
m_kvalue = 6;
m_minLoadFactor = 78;
diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp
index 031ee6315e8..5582143be44 100644
--- a/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/ndb/src/ndbapi/TransporterFacade.cpp
@@ -405,11 +405,8 @@ extern "C"
void*
runSendRequest_C(void * me)
{
- my_thread_init();
((TransporterFacade*) me)->threadMainSend();
- my_thread_end();
- NdbThread_Exit(0);
- return me;
+ return 0;
}
void TransporterFacade::threadMainSend(void)
@@ -443,11 +440,8 @@ extern "C"
void*
runReceiveResponse_C(void * me)
{
- my_thread_init();
((TransporterFacade*) me)->threadMainReceive();
- my_thread_end();
- NdbThread_Exit(0);
- return me;
+ return 0;
}
void TransporterFacade::threadMainReceive(void)
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 5df707e211d..ab32d6abb8e 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -87,11 +87,8 @@ const char *Ndb_cluster_connection::get_connectstring(char *buf,
extern "C" pthread_handler_decl(run_ndb_cluster_connection_connect_thread, me)
{
- my_thread_init();
g_run_connect_thread= 1;
((Ndb_cluster_connection_impl*) me)->connect_thread();
- my_thread_end();
- NdbThread_Exit(0);
return me;
}
diff --git a/ndb/test/include/HugoOperations.hpp b/ndb/test/include/HugoOperations.hpp
index fe22e4b5649..9ca2772e768 100644
--- a/ndb/test/include/HugoOperations.hpp
+++ b/ndb/test/include/HugoOperations.hpp
@@ -30,6 +30,8 @@ public:
int closeTransaction(Ndb*);
NdbConnection* getTransaction();
void refresh();
+
+ void setTransactionId(Uint64);
int pkInsertRecord(Ndb*,
int recordNo,
diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp
index 8b69faebde8..a60228c1a5d 100644
--- a/ndb/test/include/NDBT_Test.hpp
+++ b/ndb/test/include/NDBT_Test.hpp
@@ -188,7 +188,7 @@ public:
NDBT_TestCase(NDBT_TestSuite* psuite,
const char* name,
const char* comment);
- virtual ~NDBT_TestCase(){}
+ virtual ~NDBT_TestCase() {}
// This is the default executor of a test case
// When a test case is executed it will need to be suplied with a number of
@@ -225,6 +225,8 @@ protected:
void stopTimer(NDBT_Context*);
void printTimer(NDBT_Context*);
+ BaseString _name;
+ BaseString _comment;
const char* name;
const char* comment;
NDBT_TestSuite* suite;
diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp
index 91b2a041186..a7523e8e416 100644
--- a/ndb/test/ndbapi/benchronja.cpp
+++ b/ndb/test/ndbapi/benchronja.cpp
@@ -984,7 +984,6 @@ void* ThreadExec(void* ThreadData){
delete pMyNdb;
pMyNdb = NULL ;
ThreadReady[thread_no] = 1;
- NdbThread_Exit(0) ;
return 0 ;
}//if
@@ -1197,7 +1196,6 @@ void* ThreadExec(void* ThreadData){
} // for(;;)
delete pMyNdb ;
- NdbThread_Exit(0) ;
- return 0 ; // Compiler is happy now
+ return 0 ;
}
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp
index 1953444d640..4b87b2c70ed 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/ndb/test/ndbapi/flexAsynch.cpp
@@ -494,8 +494,7 @@ threadLoop(void* ThreadData)
delete localNdb;
ThreadReady[threadNo] = 1;
- NdbThread_Exit(0);
- return NULL; // Just to keep compiler happy
+ return NULL;
}//threadLoop()
static
diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp
index 2a2388109a1..cc2bfb391da 100644
--- a/ndb/test/ndbapi/flexBench.cpp
+++ b/ndb/test/ndbapi/flexBench.cpp
@@ -617,7 +617,7 @@ static void* flexBenchThread(void* pArg)
free(attrRefValue) ;
free(pOps) ;
delete pNdb ;
- NdbThread_Exit(0) ;
+ return 0; // thread exits
}
pNdb->init();
@@ -934,8 +934,7 @@ static void* flexBenchThread(void* pArg)
free(longKeyAttrValue);
} // if
- NdbThread_Exit(0);
- return NULL; // Just to keep compiler happy
+ return NULL; // Thread exits
}
diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp
index 688e70d501a..13cd2d5e561 100644
--- a/ndb/test/ndbapi/flexHammer.cpp
+++ b/ndb/test/ndbapi/flexHammer.cpp
@@ -612,10 +612,7 @@ flexHammerThread(void* pArg)
flexHammerErrorData->resetErrorCounters();
- // And exit using NDBT
- NdbThread_Exit(0);
-
- return NULL;
+ return NULL; // thread exits
} // flexHammerThread
diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp
index c7f4041a525..4d2c85d6955 100644
--- a/ndb/test/ndbapi/flexScan.cpp
+++ b/ndb/test/ndbapi/flexScan.cpp
@@ -701,8 +701,7 @@ flexScanThread(void* ThreadData)
free(pkValue);
} // if
- NdbThread_Exit(0);
- return NULL;
+ return NULL; // thread exits
} // flexScanThread
diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp
index 3b976f9f87e..8d5be2bb399 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/ndb/test/ndbapi/flexTT.cpp
@@ -389,8 +389,7 @@ threadLoop(void* ThreadData)
delete localNdb;
ThreadReady[loc_threadNo] = 1;
- NdbThread_Exit(0);
- return NULL; // Just to keep compiler happy
+ return NULL; // Thread exits
}//threadLoop()
static
diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp
index 27380cc79fd..2b8c0bdd5f8 100644
--- a/ndb/test/ndbapi/flexTimedAsynch.cpp
+++ b/ndb/test/ndbapi/flexTimedAsynch.cpp
@@ -406,9 +406,8 @@ threadLoop(void* ThreadData)
delete localNdb;
ThreadReady[threadNo] = 1;
- NdbThread_Exit(0);
- return NULL;
+ return NULL; // thread exits
}
void executeThread(StartType aType, Ndb* aNdbObject, ThreadNdb* threadInfo)
diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp
index c8d4d85bedf..c15175bfb00 100644
--- a/ndb/test/ndbapi/flex_bench_mysql.cpp
+++ b/ndb/test/ndbapi/flex_bench_mysql.cpp
@@ -710,7 +710,7 @@ static void* flexBenchThread(void* pArg)
the_socket_name,
0) == NULL ) {
ndbout << "failed" << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
ndbout << "ok" << endl;
@@ -722,7 +722,7 @@ static void* flexBenchThread(void* pArg)
if (r) {
ndbout << "autocommit on/off failed" << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
}
#endif
@@ -741,7 +741,7 @@ static void* flexBenchThread(void* pArg)
ndbout << threadNo << endl ;
ndbout << "Thread #" << threadNo << " will now exit" << endl ;
tResult = 13 ;
- NdbThread_Exit(0) ;
+ return 0;
}
if (use_ndb) {
@@ -750,7 +750,7 @@ static void* flexBenchThread(void* pArg)
ndbout << "Failed to get an NDB object" << endl;
ndbout << "Thread #" << threadNo << " will now exit" << endl ;
tResult = 13;
- NdbThread_Exit(0) ;
+ return 0;
}
pNdb->waitUntilReady();
return_ndb_object(pNdb, ndb_id);
@@ -900,11 +900,11 @@ static void* flexBenchThread(void* pArg)
prep_insert[i] = mysql_prepare(&mysql, buf, pos);
if (prep_insert[i] == 0) {
ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
if (mysql_bind_param(prep_insert[i], bind_insert)) {
ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
}
@@ -926,11 +926,11 @@ static void* flexBenchThread(void* pArg)
prep_update[i] = mysql_prepare(&mysql, buf, pos);
if (prep_update[i] == 0) {
ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
if (mysql_bind_param(prep_update[i], bind_update)) {
ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
}
@@ -953,15 +953,15 @@ static void* flexBenchThread(void* pArg)
prep_read[i] = mysql_prepare(&mysql, buf, pos);
if (prep_read[i] == 0) {
ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
if (mysql_bind_param(prep_read[i], bind_read)) {
ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
if (mysql_bind_result(prep_read[i], &bind_read[1])) {
ndbout << "mysql_bind_result: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
}
@@ -978,11 +978,11 @@ static void* flexBenchThread(void* pArg)
prep_delete[i] = mysql_prepare(&mysql, buf, pos);
if (prep_delete[i] == 0) {
ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
if (mysql_bind_param(prep_delete[i], bind_delete)) {
ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl;
- NdbThread_Exit(0) ;
+ return 0;
}
}
}
@@ -1431,8 +1431,7 @@ static void* flexBenchThread(void* pArg)
ndbout << "I got here " << endl;
return_ndb_object(pNdb, ndb_id);
}
- NdbThread_Exit(0);
- return NULL; // Just to keep compiler happy
+ return NULL;
}
diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/ndb/test/ndbapi/mainAsyncGenerator.cpp
index 16cb50e160f..73a8b98ab57 100644
--- a/ndb/test/ndbapi/mainAsyncGenerator.cpp
+++ b/ndb/test/ndbapi/mainAsyncGenerator.cpp
@@ -274,8 +274,6 @@ threadRoutine(void *arg)
asyncDbDisconnect(pNDB);
- NdbThread_Exit(0);
-
return NULL;
}
diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp
index 949f08281a5..e254aff58dc 100644
--- a/ndb/test/ndbapi/testOperations.cpp
+++ b/ndb/test/ndbapi/testOperations.cpp
@@ -98,6 +98,15 @@ OperationTestCase matrix[] = {
result = NDBT_FAILED; \
break; }
+#define C3(b) if (!(b)) { \
+ g_err << "ERR: "<< step->getName() \
+ << " failed on line " << __LINE__ << endl; \
+ abort(); return NDBT_FAILED; }
+
+#define C3(b) if (!(b)) { \
+ g_err << "ERR: failed on line " << __LINE__ << endl; \
+ return NDBT_FAILED; }
+
int
runOp(HugoOperations & hugoOps,
Ndb * pNdb,
@@ -228,11 +237,369 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+enum OPS { o_DONE= 0, o_INS= 1, o_UPD= 2, o_DEL= 3 };
+typedef Vector<OPS> Sequence;
+
+static
+bool
+valid(const Sequence& s)
+{
+ if(s.size() == 0)
+ return false;
+
+ for(size_t i = 1; i<s.size(); i++)
+ {
+ switch(s[i]){
+ case o_INS:
+ if(s[i-1] != o_DEL)
+ return false;
+ break;
+ case o_UPD:
+ case o_DEL:
+ if(s[i-1] == o_DEL)
+ return false;
+ break;
+ case o_DONE:
+ return true;
+ }
+ }
+ return true;
+}
+
+static
+NdbOut& operator<<(NdbOut& out, const Sequence& s)
+{
+ out << "[ ";
+ for(size_t i = 0; i<s.size(); i++)
+ {
+ switch(s[i]){
+ case o_INS:
+ out << "INS ";
+ break;
+ case o_DEL:
+ out << "DEL ";
+ break;
+ case o_UPD:
+ out << "UPD ";
+ break;
+ case o_DONE:
+ abort();
+ }
+ }
+ out << "]";
+ return out;
+}
+
+static
+void
+generate(Sequence& out, int no)
+{
+ while(no & 3)
+ {
+ out.push_back((OPS)(no & 3));
+ no >>= 2;
+ }
+}
+
+static
+void
+generate(Vector<int>& out, size_t len)
+{
+ int max= 1;
+ while(len)
+ {
+ max <<= 2;
+ len--;
+ }
+
+ len= 1;
+ for(int i = 0; i<max; i++)
+ {
+ Sequence tmp;
+ generate(tmp, i);
+
+ if(tmp.size() >= len && valid(tmp))
+ {
+ out.push_back(i);
+ len= tmp.size();
+ }
+ else
+ {
+ //ndbout << "DISCARD: " << tmp << endl;
+ }
+ }
+}
+
+static const Uint32 DUMMY = 0;
+static const Uint32 ROW = 1;
+
+int
+verify_other(NDBT_Context* ctx,
+ Ndb* pNdb, int seq, OPS latest, bool initial_row, bool commit)
+{
+ Uint32 no_wait = NdbOperation::LM_CommittedRead*
+ ctx->getProperty("NoWait", (Uint32)1);
+
+ for(size_t j = no_wait; j<3; j++)
+ {
+ HugoOperations other(*ctx->getTab());
+ C3(other.startTransaction(pNdb) == 0);
+ C3(other.pkReadRecord(pNdb, ROW, 1, (NdbOperation::LockMode)j) == 0);
+ int tmp= other.execute_Commit(pNdb);
+ if(seq == 0){
+ if(j == NdbOperation::LM_CommittedRead)
+ {
+ C3(initial_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626);
+ }
+ else
+ {
+ C3(tmp == 266);
+ }
+ }
+ else if(commit)
+ {
+ switch(latest){
+ case o_INS:
+ case o_UPD:
+ C3(tmp == 0 && other.verifyUpdatesValue(seq) == 0);
+ break;
+ case o_DEL:
+ C3(tmp == 626);
+ break;
+ case o_DONE:
+ abort();
+ }
+ }
+ else
+ {
+ // rollback
+ C3(initial_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626);
+ }
+ }
+
+ return NDBT_OK;
+}
+
+int
+verify_savepoint(NDBT_Context* ctx,
+ Ndb* pNdb, int seq, OPS latest,
+ Uint64 transactionId)
+{
+ bool initial_row= (seq == 0) && latest == o_INS;
+
+ for(size_t j = 0; j<3; j++)
+ {
+ const NdbOperation::LockMode lm= (NdbOperation::LockMode)j;
+
+ HugoOperations same(*ctx->getTab());
+ C3(same.startTransaction(pNdb) == 0);
+ same.setTransactionId(transactionId); // Cheat
+
+ /**
+ * Increase savepoint to <em>k</em>
+ */
+ for(size_t l = 1; l<=seq; l++)
+ {
+ C3(same.pkReadRecord(pNdb, DUMMY, 1, lm) == 0); // Read dummy row
+ C3(same.execute_NoCommit(pNdb) == 0);
+ g_info << "savepoint: " << l << endl;
+ }
+
+ g_info << "op(" << seq << "): "
+ << " lock mode " << lm << endl;
+
+ C3(same.pkReadRecord(pNdb, ROW, 1, lm) == 0); // Read real row
+ int tmp= same.execute_Commit(pNdb);
+ if(seq == 0)
+ {
+ if(initial_row)
+ {
+ C3(tmp == 0 && same.verifyUpdatesValue(0) == 0);
+ } else
+ {
+ C3(tmp == 626);
+ }
+ }
+ else
+ {
+ switch(latest){
+ case o_INS:
+ case o_UPD:
+ C3(tmp == 0 && same.verifyUpdatesValue(seq) == 0);
+ break;
+ case o_DEL:
+ C3(tmp == 626);
+ break;
+ case o_DONE:
+ abort();
+ }
+ }
+ }
+ return NDBT_OK;
+}
+
+int
+runOperations(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int tmp;
+ Ndb* pNdb = GETNDB(step);
+
+ Uint32 seqNo = ctx->getProperty("Sequence", (Uint32)0);
+ Uint32 commit= ctx->getProperty("Commit", (Uint32)1);
+
+ if(seqNo == 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ Sequence seq;
+ generate(seq, seqNo);
+
+ {
+ // Dummy row
+ HugoOperations hugoOps(*ctx->getTab());
+ C3(hugoOps.startTransaction(pNdb) == 0);
+ C3(hugoOps.pkInsertRecord(pNdb, DUMMY, 1, 0) == 0);
+ C3(hugoOps.execute_Commit(pNdb) == 0);
+ }
+
+ const bool initial_row= (seq[0] != o_INS);
+ if(initial_row)
+ {
+ HugoOperations hugoOps(*ctx->getTab());
+ C3(hugoOps.startTransaction(pNdb) == 0);
+ C3(hugoOps.pkInsertRecord(pNdb, ROW, 1, 0) == 0);
+ C3(hugoOps.execute_Commit(pNdb) == 0);
+ }
+
+ HugoOperations trans1(*ctx->getTab());
+ C3(trans1.startTransaction(pNdb) == 0);
+ for(size_t i = 0; i<seq.size(); i++)
+ {
+ /**
+ * Perform operation
+ */
+ switch(seq[i]){
+ case o_INS:
+ C3(trans1.pkInsertRecord(pNdb, ROW, 1, i+1) == 0);
+ break;
+ case o_UPD:
+ C3(trans1.pkUpdateRecord(pNdb, ROW, 1, i+1) == 0);
+ break;
+ case o_DEL:
+ C3(trans1.pkDeleteRecord(pNdb, ROW, 1) == 0);
+ break;
+ case o_DONE:
+ abort();
+ }
+ C3(trans1.execute_NoCommit(pNdb) == 0);
+
+ /**
+ * Verify other transaction
+ */
+ if(verify_other(ctx, pNdb, 0, seq[0], initial_row, commit) != NDBT_OK)
+ return NDBT_FAILED;
+
+ /**
+ * Verify savepoint read
+ */
+ Uint64 transactionId= trans1.getTransaction()->getTransactionId();
+
+ for(size_t k=0; k<=i+1; k++)
+ {
+ if(verify_savepoint(ctx, pNdb, k,
+ k>0 ? seq[k-1] : initial_row ? o_INS : o_DONE,
+ transactionId) != NDBT_OK)
+ return NDBT_FAILED;
+ }
+ }
+
+ if(commit)
+ {
+ C3(trans1.execute_Commit(pNdb) == 0);
+ }
+ else
+ {
+ C3(trans1.execute_Rollback(pNdb) == 0);
+ }
+
+ if(verify_other(ctx, pNdb, seq.size(), seq.back(),
+ initial_row, commit) != NDBT_OK)
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
int
main(int argc, const char** argv){
ndb_init();
+ Vector<int> tmp;
+ generate(tmp, 5);
+
NDBT_TestSuite ts("testOperations");
+ for(size_t i = 0; i<tmp.size(); i++)
+ {
+ BaseString name;
+ Sequence s;
+ generate(s, tmp[i]);
+ for(size_t j = 0; j<s.size(); j++){
+ switch(s[j]){
+ case o_INS:
+ name.append("_INS");
+ break;
+ case o_DEL:
+ name.append("_DEL");
+ break;
+ case o_UPD:
+ name.append("_UPD");
+ break;
+ case o_DONE:
+ abort();
+ }
+ }
+
+ BaseString n1;
+ n1.append(name);
+ n1.append("_COMMIT");
+
+ NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts,
+ n1.c_str()+1, "");
+
+ pt->setProperty("Sequence", tmp[i]);
+ pt->addInitializer(new NDBT_Initializer(pt,
+ "runClearTable",
+ runClearTable));
+
+ pt->addStep(new NDBT_ParallelStep(pt,
+ "run",
+ runOperations));
+
+ pt->addFinalizer(new NDBT_Finalizer(pt,
+ "runClearTable",
+ runClearTable));
+
+ ts.addTest(pt);
+
+ name.append("_ABORT");
+ pt = new NDBT_TestCaseImpl1(&ts, name.c_str()+1, "");
+ pt->setProperty("Sequence", tmp[i]);
+ pt->setProperty("Commit", (Uint32)0);
+ pt->addInitializer(new NDBT_Initializer(pt,
+ "runClearTable",
+ runClearTable));
+
+ pt->addStep(new NDBT_ParallelStep(pt,
+ "run",
+ runOperations));
+
+ pt->addFinalizer(new NDBT_Finalizer(pt,
+ "runClearTable",
+ runClearTable));
+
+ ts.addTest(pt);
+ }
+
for(Uint32 i = 0; i<sizeof(matrix)/sizeof(matrix[0]); i++){
NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts, matrix[i].name, "");
@@ -270,3 +637,5 @@ main(int argc, const char** argv){
return ts.execute(argc, argv);
}
+template class Vector<OPS>;
+template class Vector<Sequence>;
diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp
index 22ec3fff327..f1018d29846 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/ndb/test/ndbapi/testScan.cpp
@@ -35,7 +35,8 @@ getTable(Ndb* pNdb, int i){
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
- int records = ctx->getNumRecords();
+ int records = ctx->getProperty("Rows", ctx->getNumRecords());
+
HugoTransactions hugoTrans(*ctx->getTab());
if (hugoTrans.loadTable(GETNDB(step), records) != 0){
return NDBT_FAILED;
@@ -264,7 +265,7 @@ int runVerifyTable(NDBT_Context* ctx, NDBT_Step* step){
int runScanRead(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
- int records = ctx->getNumRecords();
+ int records = ctx->getProperty("Rows", ctx->getNumRecords());
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb", 5);
@@ -375,7 +376,20 @@ int runScanReadError(NDBT_Context* ctx, NDBT_Step* step){
restarter.insertErrorInAllNodes(0);
return result;
}
-
+
+int
+runInsertError(NDBT_Context* ctx, NDBT_Step* step){
+ int error = ctx->getProperty("ErrorCode");
+ NdbRestarter restarter;
+
+ ctx->setProperty("ErrorCode", (Uint32)0);
+ if (restarter.insertErrorInAllNodes(error) != 0){
+ ndbout << "Could not insert error in all nodes "<<endl;
+ return NDBT_FAILED;
+ }
+ return NDBT_OK;
+}
+
int runScanReadErrorOneNode(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
int loops = ctx->getNumLoops();
@@ -1221,6 +1235,16 @@ TESTCASE("ScanRead100",
STEPS(runScanRead, 100);
FINALIZER(runClearTable);
}
+TESTCASE("Scan-bug8262",
+ ""){
+ TC_PROPERTY("Rows", 1);
+ TC_PROPERTY("ErrorCode", 8035);
+ INITIALIZER(runLoadTable);
+ INITIALIZER(runInsertError); // Will reset error code
+ STEPS(runScanRead, 25);
+ FINALIZER(runInsertError);
+ FINALIZER(runClearTable);
+}
TESTCASE("ScanRead40RandomTable",
"Verify scan requirement: Scan with 40 simultaneous threads. "\
"Use random table for the scan"){
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index 87f86795370..c62908ae999 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -378,6 +378,10 @@ max-time: 500
cmd: testScan
args: -n ScanRestart T1
+max-time: 500
+cmd: testScan
+args: -l 100 -n Scan-bug8262 T7
+
# OLD FLEX
max-time: 500
cmd: flexBench
diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp
index e8e2d992345..8e6603ec6ff 100644
--- a/ndb/test/src/HugoOperations.cpp
+++ b/ndb/test/src/HugoOperations.cpp
@@ -32,6 +32,13 @@ int HugoOperations::startTransaction(Ndb* pNdb){
return NDBT_OK;
}
+void
+HugoOperations::setTransactionId(Uint64 id){
+ if (pTrans != NULL){
+ pTrans->setTransactionId(id);
+ }
+}
+
int HugoOperations::closeTransaction(Ndb* pNdb){
if (pTrans != NULL){
@@ -401,6 +408,10 @@ HugoOperations::HugoOperations(const NdbDictionary::Table& _tab):
HugoOperations::~HugoOperations(){
deallocRows();
+ if (pTrans != NULL){
+ pTrans->close();
+ pTrans = NULL;
+ }
}
diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp
index 096f5406bbf..85c96ef0f7f 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/ndb/test/src/HugoTransactions.cpp
@@ -68,7 +68,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -77,14 +77,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if( rs == 0 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -92,7 +92,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -102,13 +102,13 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -127,7 +127,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
while((eof = rs->nextResult(true)) == 0){
rows++;
if (calc.verifyRowValues(&row) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -137,11 +137,11 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
rs->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
}
@@ -150,7 +150,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR_INFO(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
switch (err.code){
case 488:
@@ -164,17 +164,17 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been read" << endl;
if (records != 0 && rows != records){
g_err << "Check expected number of records failed" << endl
- << " expected=" << records <<", " << endl
- << " read=" << rows << endl;
+ << " expected=" << records <<", " << endl
+ << " read=" << rows << endl;
return NDBT_FAILED;
}
@@ -224,7 +224,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
pOp = pTrans->getNdbIndexScanOperation(pIdx->getName(), tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -233,14 +233,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if( rs == 0 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -248,7 +248,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -258,13 +258,13 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -283,7 +283,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
while((eof = rs->nextResult(true)) == 0){
rows++;
if (calc.verifyRowValues(&row) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -293,11 +293,11 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
rs->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
}
@@ -306,7 +306,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR_INFO(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
switch (err.code){
case 488:
@@ -320,17 +320,17 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been read" << endl;
if (records != 0 && rows != records){
g_err << "Check expected number of records failed" << endl
- << " expected=" << records <<", " << endl
- << " read=" << rows << endl;
+ << " expected=" << records <<", " << endl
+ << " read=" << rows << endl;
return NDBT_FAILED;
}
@@ -344,9 +344,9 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int
HugoTransactions::scanUpdateRecords(Ndb* pNdb,
- int records,
- int abortPercent,
- int parallelism){
+ int records,
+ int abortPercent,
+ int parallelism){
if(m_defaultScanUpdateMethod == 1){
return scanUpdateRecords1(pNdb, records, abortPercent, parallelism);
} else if(m_defaultScanUpdateMethod == 2){
@@ -398,21 +398,21 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->openScanExclusive(parallelism);
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -420,7 +420,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -430,13 +430,13 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -464,11 +464,11 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
check = pTrans->stopScan();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
int res = takeOverAndUpdateRecord(pNdb, pOp);
@@ -477,7 +477,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
continue;
}
if (res != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return res;
}
@@ -501,18 +501,18 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
if(eof == -2){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been updated" << endl;
return NDBT_OK;
@@ -565,21 +565,21 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->openScanExclusive(parallelism);
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -587,7 +587,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -597,13 +597,13 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -639,7 +639,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
rows++;
if (addRowToUpdate(pNdb, pUpTrans, pOp) != 0){
pNdb->closeTransaction(pUpTrans);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
} while((eof = pTrans->nextScanResult(false)) == 0);
@@ -650,12 +650,12 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
check = pTrans->stopScan();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
pNdb->closeTransaction(pUpTrans);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
pNdb->closeTransaction(pUpTrans);
return NDBT_OK;
}
@@ -665,7 +665,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
const NdbError err = pUpTrans->getNdbError();
ERR(err);
pNdb->closeTransaction(pUpTrans);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
pNdb->closeTransaction(pUpTrans);
@@ -675,17 +675,17 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been updated" << endl;
return NDBT_OK;
@@ -707,7 +707,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
while (true){
- restart:
+restart:
if (retryAttempt++ >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
@@ -728,14 +728,14 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
NdbResultSet *rs = pOp->readTuplesExclusive(parallelism);
if( rs == 0 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -743,7 +743,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -752,7 +752,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
if (err.status == NdbError::TemporaryError){
NdbSleep_MilliSleep(50);
continue;
@@ -777,7 +777,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
NdbOperation* pUp = rs->updateTuple();
if(pUp == 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
const int updates = calc.getUpdatesValue(&row) + 1;
@@ -786,7 +786,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUp, a, r, updates ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -795,7 +795,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
if (rows == abortCount && abortTrans == true){
g_info << "Scan is aborted" << endl;
// This scan should be aborted
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
} while((check = rs->nextResult(false)) == 0);
@@ -807,7 +807,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if( check == -1 ) {
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
ERR(err);
if (err.status == NdbError::TemporaryError){
NdbSleep_MilliSleep(50);
@@ -819,7 +819,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
const NdbError err = pTrans->getNdbError();
if( check == -1 ) {
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
ERR(err);
if (err.status == NdbError::TemporaryError){
NdbSleep_MilliSleep(50);
@@ -828,7 +828,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been updated" << endl;
return NDBT_OK;
@@ -899,14 +899,14 @@ HugoTransactions::loadTable(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->insertTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -914,7 +914,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
for (a = 0; a<tab.getNoOfColumns(); a++){
if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -932,7 +932,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
if(check == -1 ) {
const NdbError err = pTrans->getNdbError();
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
pTrans= 0;
switch(err.status){
case NdbError::Success:
@@ -974,7 +974,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
else{
if (closeTrans) {
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
pTrans= 0;
}
}
@@ -1025,14 +1025,14 @@ HugoTransactions::fillTable(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->insertTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1040,7 +1040,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
for (a = 0; a<tab.getNoOfColumns(); a++){
if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1050,7 +1050,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
check = pTrans->execute( Commit, CommitAsMuchAsPossible );
if(check == -1 ) {
const NdbError err = pTrans->getNdbError();
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
switch(err.status){
case NdbError::Success:
@@ -1102,7 +1102,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
}
}
else{
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
}
// Step to next record
@@ -1419,7 +1419,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1441,7 +1441,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1450,7 +1450,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1461,7 +1461,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1474,7 +1474,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
@@ -1487,13 +1487,13 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
default:
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
} else{
for (int b=0; (b<batchsize) && (r+b<records); b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
reads++;
@@ -1501,7 +1501,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
}
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
}
deallocRows();
@@ -1556,14 +1556,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->readTupleExclusive();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1572,7 +1572,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1583,7 +1583,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1595,19 +1595,19 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
for(b = 0; b<batch && (b+r)<records; b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1617,14 +1617,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
pUpdOp = pTrans->getNdbOperation(tab.getName());
if (pUpdOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pUpdOp->updateTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1632,7 +1632,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1642,7 +1642,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1655,14 +1655,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
ndbout << "r = " << r << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
else{
@@ -1670,7 +1670,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
r += batch; // Read next record
}
@@ -1716,14 +1716,14 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->readTupleExclusive();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1732,7 +1732,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1744,7 +1744,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1756,13 +1756,13 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1772,14 +1772,14 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
pUpdOp = pTrans->getNdbOperation(tab.getName());
if (pUpdOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pUpdOp->interpretedUpdateTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1788,7 +1788,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1805,7 +1805,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
check = pUpdOp->incValue(attr->getName(), valToIncWith);
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1817,7 +1817,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
(calc.isUpdateCol(a) == false)){
if(setValueForAttr(pUpdOp, a, r, updates ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1831,14 +1831,14 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
ndbout << "r = " << r << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
else{
@@ -1846,7 +1846,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
r++; // Read next record
@@ -1900,14 +1900,14 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->deleteTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1916,7 +1916,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1928,7 +1928,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
switch(err.status){
case NdbError::TemporaryError:
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
@@ -1947,20 +1947,20 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
}
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
break;
default:
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
else {
deleted++;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
r++; // Read next record
@@ -2023,14 +2023,14 @@ HugoTransactions::lockRecords(Ndb* pNdb,
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->readTupleExclusive();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2039,7 +2039,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2050,7 +2050,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2066,18 +2066,18 @@ HugoTransactions::lockRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
for (int b=0; (b<lockBatch) && (r+b<records); b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2092,26 +2092,26 @@ HugoTransactions::lockRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
else{
for (int b=0; (b<lockBatch) && (r<records); b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
r++; // Read next record
}
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
}
@@ -2178,7 +2178,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
pOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->readTuple();
@@ -2186,7 +2186,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName());
if (sOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2196,7 +2196,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2205,7 +2205,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2216,7 +2216,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2229,7 +2229,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
@@ -2242,13 +2242,13 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
default:
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
} else{
for (int b=0; (b<batchsize) && (r+b<records); b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
reads++;
@@ -2257,11 +2257,11 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
if(ordered && rs->nextResult(true) == 0){
ndbout << "Error when comparing records "
<< " - index op next_result to many" << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
}
deallocRows();
g_info << reads << " records read" << endl;
@@ -2322,21 +2322,21 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
pOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->readTupleExclusive();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
} else {
pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2349,7 +2349,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2360,7 +2360,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2371,7 +2371,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
if (err.status == NdbError::TemporaryError){
NdbSleep_MilliSleep(50);
@@ -2383,13 +2383,13 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if(ordered && check != 0){
g_err << "Row: " << r << " not found!!" << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
for(b = 0; b<batchsize && (b+r)<records; b++){
if (calc.verifyRowValues(rows[b]) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2405,13 +2405,13 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if (pUpdOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -2420,7 +2420,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2431,7 +2431,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -2442,7 +2442,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
if (err.status == NdbError::TemporaryError){
NdbSleep_MilliSleep(50);
@@ -2455,7 +2455,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
updated += batchsize;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
r+= batchsize; // Read next record
}
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index bbbde008938..600a5443f40 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -327,13 +327,17 @@ NDBT_Finalizer::NDBT_Finalizer(NDBT_TestCase* ptest,
NDBT_TestCase::NDBT_TestCase(NDBT_TestSuite* psuite,
const char* pname,
const char* pcomment) :
- name(pname) ,
- comment(pcomment),
- suite(psuite){
+ name(strdup(pname)) ,
+ comment(strdup(pcomment)),
+ suite(psuite)
+{
+ _name.assign(pname);
+ _comment.assign(pcomment);
+ name= _name.c_str();
+ comment= _comment.c_str();
assert(suite != NULL);
}
-
NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(NDBT_TestSuite* psuite,
const char* pname,
const char* pcomment) :
@@ -475,7 +479,6 @@ void *
runStep_C(void * s)
{
runStep(s);
- NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/test/tools/transproxy.cpp b/ndb/test/tools/transproxy.cpp
index 88267801172..28a621fa584 100644
--- a/ndb/test/tools/transproxy.cpp
+++ b/ndb/test/tools/transproxy.cpp
@@ -291,7 +291,6 @@ extern "C" void*
copyrun_C(void* copy)
{
((Copy*) copy)->run();
- NdbThread_Exit(0);
return 0;
}
@@ -322,7 +321,6 @@ extern "C" void*
connrun_C(void* conn)
{
((Conn*) conn)->run();
- NdbThread_Exit(0);
return 0;
}
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 71b70fc0e4a..d5337df35b1 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -140,6 +140,9 @@ SUFFIXES = .sh
-e 's!@''IS_LINUX''@!@IS_LINUX@!' \
-e "s!@""CONF_COMMAND""@!@CONF_COMMAND@!" \
-e 's!@''MYSQLD_USER''@!@MYSQLD_USER@!' \
+ -e 's!@''STATIC_NSS_FLAGS''@!@STATIC_NSS_FLAGS@!' \
+ -e 's!@''NON_THREADED_LIBS''@!@NON_THREADED_LIBS@!' \
+ -e 's!@''ZLIB_DEPS''@!@ZLIB_DEPS@!' \
-e "s!@MAKE@!$(MAKE)!" \
$< > $@-t
@CHMOD@ +x $@-t
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 22b51168c23..910aa38c33f 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -107,8 +107,11 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \
client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \
client/mysqldump$BS client/mysqlimport$BS \
client/mysqltest$BS client/mysqlcheck$BS \
- client/mysqlbinlog$BS
-";
+ client/mysqlbinlog$BS \
+ tests/mysql_client_test$BS \
+ libmysqld/examples/mysql_client_test_embedded$BS \
+ libmysqld/examples/mysqltest_embedded$BS \
+ ";
# Platform-specific bin dir files:
if [ $BASE_SYSTEM = "netware" ] ; then
@@ -127,8 +130,9 @@ else
client/.libs/mysqltest client/.libs/mysqlcheck \
client/.libs/mysqlbinlog client/.libs/mysqlmanagerc \
client/.libs/mysqlmanager-pwgen tools/.libs/mysqlmanager \
- tests/.libs/mysql_client_test libmysqld/examples/mysql_client_test_embedded \
- libmysqld/examples/mysqltest_embedded \
+ tests/.libs/mysql_client_test \
+ libmysqld/examples/.libs/mysql_client_test_embedded \
+ libmysqld/examples/.libs/mysqltest_embedded \
";
fi
@@ -216,7 +220,7 @@ $CP mysql-test/include/*.inc $BASE/mysql-test/include
$CP mysql-test/std_data/*.dat mysql-test/std_data/*.*001 $BASE/mysql-test/std_data
$CP mysql-test/std_data/des_key_file $BASE/mysql-test/std_data
$CP mysql-test/t/*test mysql-test/t/*.opt mysql-test/t/*.slave-mi mysql-test/t/*.sh $BASE/mysql-test/t
-$CP mysql-test/r/*result mysql-test/r/*.require $BASE/mysql-test/r
+$CP mysql-test/r/*result mysql-test/r/*result.es mysql-test/r/*.require $BASE/mysql-test/r
if [ $BASE_SYSTEM != "netware" ] ; then
chmod a+x $BASE/bin/*
diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh
index 90418de3d1d..a5c8af5ecb2 100644
--- a/scripts/mysql_config.sh
+++ b/scripts/mysql_config.sh
@@ -82,13 +82,14 @@ version='@VERSION@'
socket='@MYSQL_UNIX_ADDR@'
port='@MYSQL_TCP_PORT@'
ldflags='@LDFLAGS@'
-client_libs='@CLIENT_LIBS@'
# Create options
-libs="$ldflags -L$pkglibdir -lmysqlclient $client_libs"
+libs="$ldflags -L$pkglibdir -lmysqlclient @ZLIB_DEPS@ @NON_THREADED_LIBS@"
+libs="$libs @openssl_libs@ @STATIC_NSS_FLAGS@"
libs=`echo "$libs" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'`
-libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @LIBS@ @ZLIB_LIBS@ @openssl_libs@"
+
+libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @ZLIB_DEPS@ @LIBS@ @openssl_libs@"
libs_r=`echo "$libs_r" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'`
cflags="-I$pkgincludedir @CFLAGS@ " #note: end space!
include="-I$pkgincludedir"
diff --git a/sql/field.cc b/sql/field.cc
index 7357bc06f11..e2f75034e52 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -245,6 +245,7 @@ static Field::field_cast_enum field_cast_date[]=
Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP};
static Field::field_cast_enum field_cast_newdate[]=
{Field::FIELD_CAST_NEWDATE,
+ Field::FIELD_CAST_DATE,
Field::FIELD_CAST_DATETIME,
Field::FIELD_CAST_STRING, Field::FIELD_CAST_VARSTRING,
Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP};
@@ -3511,9 +3512,17 @@ void Field_time::sql_type(String &res) const
int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
{
- int not_used; // We can ignore result from str2int
+ int err;
char *end;
- long nr= my_strntol(cs, from, len, 10, &end, &not_used);
+ long nr= my_strntol(cs, from, len, 10, &end, &err);
+
+ if (err)
+ {
+ if (table->in_use->count_cuted_fields)
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1);
+ *ptr= 0;
+ return 0;
+ }
if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155)
{
@@ -6024,6 +6033,40 @@ Field *make_field(char *ptr, uint32 field_length,
}
+/*
+ Check if field_type is appropriate field type
+ to create field for tmp table using
+ item->tmp_table_field() method
+
+ SYNOPSIS
+ field_types_to_be_kept()
+ field_type - field type
+
+ NOTE
+ it is used in function get_holder_example_field()
+ from item.cc
+
+ RETURN
+ 1 - can use item->tmp_table_field() method
+ 0 - can not use item->tmp_table_field() method
+
+*/
+
+bool field_types_to_be_kept(enum_field_types field_type)
+{
+ switch (field_type)
+ {
+ case FIELD_TYPE_DATE:
+ case FIELD_TYPE_NEWDATE:
+ case FIELD_TYPE_TIME:
+ case FIELD_TYPE_DATETIME:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
/* Create a field suitable for create of table */
create_field::create_field(Field *old_field,Field *orig_field)
diff --git a/sql/field.h b/sql/field.h
index 27a01a69273..fd0f2f9c2f1 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1265,6 +1265,7 @@ int set_field_to_null(Field *field);
int set_field_to_null_with_conversions(Field *field, bool no_conversions);
bool test_if_int(const char *str, int length, const char *int_end,
CHARSET_INFO *cs);
+bool field_types_to_be_kept(enum_field_types field_type);
/*
The following are for the interface with the .frm file
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 1d75ce99aee..702139624ff 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -5282,8 +5282,27 @@ ha_innobase::store_lock(
are not simple SELECTs; note that select_lock_type in this
case may get strengthened in ::external_lock() to LOCK_X. */
- prebuilt->select_lock_type = LOCK_S;
- prebuilt->stored_select_lock_type = LOCK_S;
+ if (srv_locks_unsafe_for_binlog &&
+ prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE &&
+ (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) &&
+ thd->lex->sql_command != SQLCOM_SELECT &&
+ thd->lex->sql_command != SQLCOM_UPDATE_MULTI &&
+ thd->lex->sql_command != SQLCOM_DELETE_MULTI ) {
+
+ /* In case we have innobase_locks_unsafe_for_binlog
+ option set and isolation level of the transaction
+ is not set to serializable and MySQL is doing
+ INSERT INTO...SELECT without FOR UPDATE or IN
+ SHARE MODE we use consistent read for select.
+ Similarly, in case of DELETE...SELECT and
+ UPDATE...SELECT when these are not multi table.*/
+
+ prebuilt->select_lock_type = LOCK_NONE;
+ prebuilt->stored_select_lock_type = LOCK_NONE;
+ } else {
+ prebuilt->select_lock_type = LOCK_S;
+ prebuilt->stored_select_lock_type = LOCK_S;
+ }
} else if (lock_type != TL_IGNORE) {
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 972d6b18e19..1e6cf2f4ada 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -88,8 +88,12 @@ class ha_myisam: public handler
ft_handler->please->reinit_search(ft_handler);
return 0;
}
- FT_INFO *ft_init_ext(uint flags, uint inx,const byte *key, uint keylen)
- { return ft_init_search(flags,file,inx,(byte*) key,keylen, table->record[0]); }
+ FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
+ {
+ return ft_init_search(flags,file,inx,
+ (byte *)key->ptr(), key->length(), key->charset(),
+ table->record[0]);
+ }
int ft_read(byte *buf);
int rnd_init(bool scan);
int rnd_next(byte *buf);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a959cbaf434..9e34baae198 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -3503,6 +3503,52 @@ static int create_ndb_column(NDBCOL &col,
Create a table in NDB Cluster
*/
+static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
+{
+ if (form->max_rows == 0) /* default setting, don't set fragmentation */
+ return;
+ /**
+ * get the number of fragments right
+ */
+ uint no_fragments;
+ {
+#if MYSQL_VERSION_ID >= 50000
+ uint acc_row_size= 25 + /*safety margin*/ 2;
+#else
+ uint acc_row_size= pk_length*4;
+ /* add acc overhead */
+ if (pk_length <= 8) /* main page will set the limit */
+ acc_row_size+= 25 + /*safety margin*/ 2;
+ else /* overflow page will set the limit */
+ acc_row_size+= 4 + /*safety margin*/ 4;
+#endif
+ ulonglong acc_fragment_size= 512*1024*1024;
+ ulonglong max_rows= form->max_rows;
+#if MYSQL_VERSION_ID >= 50100
+ no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
+#else
+ no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1
+ +1/*correct rounding*/)/2;
+#endif
+ }
+ {
+ uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
+ NDBTAB::FragmentType ftype;
+ if (no_fragments > 2*no_nodes)
+ {
+ ftype= NDBTAB::FragAllLarge;
+ if (no_fragments > 4*no_nodes)
+ push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "Ndb might have problems storing the max amount of rows specified");
+ }
+ else if (no_fragments > no_nodes)
+ ftype= NDBTAB::FragAllMedium;
+ else
+ ftype= NDBTAB::FragAllSmall;
+ tab.setFragmentType(ftype);
+ }
+}
+
int ha_ndbcluster::create(const char *name,
TABLE *form,
HA_CREATE_INFO *info)
@@ -3605,7 +3651,9 @@ int ha_ndbcluster::create(const char *name,
break;
}
}
-
+
+ ndb_set_fragmentation(tab, form, pk_length);
+
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
diff --git a/sql/handler.h b/sql/handler.h
index 245defe61e0..0426312f404 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -373,8 +373,7 @@ public:
int compare_key(key_range *range);
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
void ft_end() { ft_handler=NULL; }
- virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
- uint keylen)
+ virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
{ return NULL; }
virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
virtual int rnd_next(byte *buf)=0;
diff --git a/sql/item.cc b/sql/item.cc
index ab29c147dfb..d61d628e8fa 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2639,7 +2639,53 @@ void Item_cache_row::bring_value()
}
-Item_type_holder::Item_type_holder(THD *thd, Item *item)
+/*
+ Returns field for temporary table dependind on item type
+
+ SYNOPSIS
+ get_holder_example_field()
+ thd - thread handler
+ item - pointer to item
+ table - empty table object
+
+ NOTE
+ It is possible to return field for Item_func
+ items only if field type of this item is
+ date or time or datetime type.
+ also see function field_types_to_be_kept() from
+ field.cc
+
+ RETURN
+ # - field
+ 0 - no field
+*/
+
+Field *get_holder_example_field(THD *thd, Item *item, TABLE *table)
+{
+ DBUG_ASSERT(table);
+
+ Item_func *tmp_item= 0;
+ if (item->type() == Item::FIELD_ITEM)
+ return (((Item_field*) item)->field);
+ if (item->type() == Item::FUNC_ITEM)
+ tmp_item= (Item_func *) item;
+ else if (item->type() == Item::SUM_FUNC_ITEM)
+ {
+ Item_sum *item_sum= (Item_sum *) item;
+ if (item_sum->keep_field_type())
+ {
+ if (item_sum->args[0]->type() == Item::FIELD_ITEM)
+ return (((Item_field*) item_sum->args[0])->field);
+ if (item_sum->args[0]->type() == Item::FUNC_ITEM)
+ tmp_item= (Item_func *) item_sum->args[0];
+ }
+ }
+ return (tmp_item && field_types_to_be_kept(tmp_item->field_type()) ?
+ tmp_item->tmp_table_field(table) : 0);
+}
+
+
+Item_type_holder::Item_type_holder(THD *thd, Item *item, TABLE *table)
:Item(thd, item), item_type(item->result_type()),
orig_type(item_type)
{
@@ -2649,10 +2695,7 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item)
It is safe assign pointer on field, because it will be used just after
all JOIN::prepare calls and before any SELECT execution
*/
- if (item->type() == Item::FIELD_ITEM)
- field_example= ((Item_field*) item)->field;
- else
- field_example= 0;
+ field_example= get_holder_example_field(thd, item, table);
max_length= real_length(item);
maybe_null= item->maybe_null;
collation.set(item->collation);
@@ -2692,25 +2735,23 @@ inline bool is_attr_compatible(Item *from, Item *to)
(to->maybe_null || !from->maybe_null) &&
(to->result_type() != STRING_RESULT ||
from->result_type() != STRING_RESULT ||
- my_charset_same(from->collation.collation,
- to->collation.collation)));
+ (from->collation.collation == to->collation.collation)));
}
-bool Item_type_holder::join_types(THD *thd, Item *item)
+bool Item_type_holder::join_types(THD *thd, Item *item, TABLE *table)
{
uint32 new_length= real_length(item);
bool use_new_field= 0, use_expression_type= 0;
Item_result new_result_type= type_convertor[item_type][item->result_type()];
- bool item_is_a_field= item->type() == Item::FIELD_ITEM;
-
+ Field *field= get_holder_example_field(thd, item, table);
+ bool item_is_a_field= field;
/*
Check if both items point to fields: in this case we
can adjust column types of result table in the union smartly.
*/
if (field_example && item_is_a_field)
{
- Field *field= ((Item_field *)item)->field;
/* Can 'field_example' field store data of the column? */
if ((use_new_field=
(!field->field_cast_compatible(field_example->field_cast_type()) ||
@@ -2751,7 +2792,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
It is safe to assign a pointer to field here, because it will be used
before any table is closed.
*/
- field_example= ((Item_field*) item)->field;
+ field_example= field;
}
old_cs= collation.collation->name;
diff --git a/sql/item.h b/sql/item.h
index 237a8f7efac..e0de7452eec 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1321,14 +1321,14 @@ protected:
Item_result orig_type;
Field *field_example;
public:
- Item_type_holder(THD*, Item*);
+ Item_type_holder(THD*, Item*, TABLE *);
Item_result result_type () const { return item_type; }
enum Type type() const { return TYPE_HOLDER; }
double val();
longlong val_int();
String *val_str(String*);
- bool join_types(THD *thd, Item *);
+ bool join_types(THD *thd, Item *, TABLE *);
Field *example() { return field_example; }
static uint32 real_length(Item *item);
void cleanup()
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index c5e6d520ab7..2b9a612da18 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1503,7 +1503,11 @@ void in_string::set(uint pos,Item *item)
String *str=((String*) base)+pos;
String *res=item->val_str(str);
if (res && res != str)
+ {
+ if (res->uses_buffer_owned_by(str))
+ res->copy();
*str= *res;
+ }
if (!str->charset())
{
CHARSET_INFO *cs;
@@ -1960,6 +1964,36 @@ bool Item_cond::walk(Item_processor processor, byte *arg)
return Item_func::walk(processor, arg);
}
+
+/*
+ Move SUM items out from item tree and replace with reference
+
+ SYNOPSIS
+ split_sum_func()
+ thd Thread handler
+ ref_pointer_array Pointer to array of reference fields
+ fields All fields in select
+
+ NOTES
+ This function is run on all expression (SELECT list, WHERE, HAVING etc)
+ that have or refer (HAVING) to a SUM expression.
+
+ The split is done to get an unique item for each SUM function
+ so that we can easily find and calculate them.
+ (Calculation done by update_sum_func() and copy_sum_funcs() in
+ sql_select.cc)
+
+ All found SUM items are added FIRST in the fields list and
+ we replace the item with a reference.
+
+ We also replace all functions without side effects (like RAND() or UDF's)
+ that uses columns as arguments.
+ For functions with side effects, we just remember any fields referred
+ by the function to ensure that we get a copy of the field value for the
+ first accepted row. This ensures that we can do things like
+ SELECT a*SUM(b) FROM t1 WHERE a=1
+*/
+
void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
@@ -1969,10 +2003,22 @@ void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array,
const_item_cache=0;
while ((item=li++))
{
- if (item->with_sum_func && item->type() != SUM_FUNC_ITEM)
+ /* with_sum_func is set for items that contains a SUM expression */
+ if (item->type() != SUM_FUNC_ITEM &&
+ (item->with_sum_func ||
+ (item->used_tables() & PSEUDO_TABLE_BITS)))
item->split_sum_func(thd, ref_pointer_array, fields);
- else if (item->used_tables() || item->type() == SUM_FUNC_ITEM)
+ else if (item->type() == SUM_FUNC_ITEM ||
+ (item->used_tables() && item->type() != REF_ITEM))
{
+ /*
+ Replace item with a reference so that we can easily calculate
+ it (in case of sum functions) or copy it (in case of fields)
+
+ The test above is to ensure we don't do a reference for things
+ that are constants or are not yet calculated as in:
+ SELECT RAND() as r1, SUM(a) as r2 FROM t1 HAVING r1 > 1 AND r2 > 0
+ */
Item **ref= li.ref();
uint el= fields.elements;
ref_pointer_array[el]= item;
diff --git a/sql/item_func.cc b/sql/item_func.cc
index bff49541252..d5d0ac8cd49 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -351,6 +351,7 @@ bool Item_func::walk (Item_processor processor, byte *argument)
return (this->*processor)(argument);
}
+
void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
@@ -358,9 +359,12 @@ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array,
for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++)
{
Item *item=* arg;
- if (item->with_sum_func && item->type() != SUM_FUNC_ITEM)
+ if (item->type() != SUM_FUNC_ITEM &&
+ (item->with_sum_func ||
+ (item->used_tables() & PSEUDO_TABLE_BITS)))
item->split_sum_func(thd, ref_pointer_array, fields);
- else if (item->used_tables() || item->type() == SUM_FUNC_ITEM)
+ else if (item->type() == SUM_FUNC_ITEM ||
+ (item->used_tables() && item->type() != REF_ITEM))
{
uint el= fields.elements;
ref_pointer_array[el]= item;
@@ -2298,14 +2302,10 @@ longlong Item_func_last_insert_id::val_int()
longlong value=args[0]->val_int();
current_thd->insert_id(value);
null_value=args[0]->null_value;
- return value;
}
else
- {
- Item *it= get_system_var(current_thd, OPT_SESSION, "last_insert_id", 14,
- "last_insert_id()");
- return it->val_int();
- }
+ current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return current_thd->insert_id();
}
/* This function is just used to test speed of different functions */
@@ -3047,9 +3047,7 @@ void Item_func_match::init_search(bool no_order)
if (join_key && !no_order)
flags|=FT_SORTED;
- ft_handler=table->file->ft_init_ext(flags, key,
- (byte*) ft_tmp->ptr(),
- ft_tmp->length());
+ ft_handler=table->file->ft_init_ext(flags, key, ft_tmp);
if (join_key)
table->file->ft_handler=ft_handler;
@@ -3091,12 +3089,12 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref)
}
/*
Check that all columns come from the same table.
- We've already checked that columns in MATCH are fields so
+ We've already checked that columns in MATCH are fields so
PARAM_TABLE_BIT can only appear from AGAINST argument.
*/
if ((used_tables_cache & ~PARAM_TABLE_BIT) != item->used_tables())
key=NO_SUCH_KEY;
-
+
if (key == NO_SUCH_KEY && !(flags & FT_BOOL))
{
my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH");
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 4e4957b980e..0ace0fc0451 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -84,15 +84,20 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref)
return 0;
}
+
void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
Item **arg, **arg_end;
for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++)
{
- if ((*arg)->with_sum_func && (*arg)->type() != SUM_FUNC_ITEM)
- (*arg)->split_sum_func(thd, ref_pointer_array, fields);
- else if ((*arg)->used_tables() || (*arg)->type() == SUM_FUNC_ITEM)
+ Item *item= *arg;
+ if (item->type() != SUM_FUNC_ITEM &&
+ (item->with_sum_func ||
+ (item->used_tables() & PSEUDO_TABLE_BITS)))
+ item->split_sum_func(thd, ref_pointer_array, fields);
+ else if (item->type() == SUM_FUNC_ITEM ||
+ (item->used_tables() && item->type() != REF_ITEM))
{
uint el= fields.elements;
ref_pointer_array[el]=*arg;
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index cee3316886a..b22b65eddd0 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1758,9 +1758,12 @@ String *Item_func_elt::val_str(String *str)
void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
- if (item->with_sum_func && item->type() != SUM_FUNC_ITEM)
+ if (item->type() != SUM_FUNC_ITEM &&
+ (item->with_sum_func ||
+ (item->used_tables() & PSEUDO_TABLE_BITS)))
item->split_sum_func(thd, ref_pointer_array, fields);
- else if (item->used_tables() || item->type() == SUM_FUNC_ITEM)
+ else if (item->type() == SUM_FUNC_ITEM ||
+ (item->used_tables() && item->type() != REF_ITEM))
{
uint el= fields.elements;
ref_pointer_array[el]=item;
@@ -2614,18 +2617,13 @@ String *Item_func_quote::val_str(String *str)
for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++)
new_length+= get_esc_bit(escmask, (uchar) *from);
- /*
- We have to use realloc() instead of alloc() as we want to keep the
- old result in arg
- */
- if (arg->realloc(new_length))
+ if (tmp_value.alloc(new_length))
goto null;
/*
- As 'arg' and 'str' may be the same string, we must replace characters
- from the end to the beginning
+ We replace characters from the end to the beginning
*/
- to= (char*) arg->ptr() + new_length - 1;
+ to= (char*) tmp_value.ptr() + new_length - 1;
*to--= '\'';
for (start= (char*) arg->ptr(),end= start + arg_length; end-- != start; to--)
{
@@ -2653,10 +2651,10 @@ String *Item_func_quote::val_str(String *str)
}
}
*to= '\'';
- arg->length(new_length);
- str->set_charset(collation.collation);
+ tmp_value.length(new_length);
+ tmp_value.set_charset(collation.collation);
null_value= 0;
- return arg;
+ return &tmp_value;
null:
null_value= 1;
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 698536a61c7..c1c0969672c 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -588,6 +588,7 @@ public:
class Item_func_quote :public Item_str_func
{
+ String tmp_value;
public:
Item_func_quote(Item *a) :Item_str_func(a) {}
const char *func_name() const { return "quote"; }
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 7434897ab90..fe1f268e277 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -485,62 +485,58 @@ void close_temporary(TABLE *table,bool delete_table)
void close_temporary_tables(THD *thd)
{
TABLE *table,*next;
- char *query, *end;
- uint query_buf_size;
- bool found_user_tables = 0;
+ char *query, *name_in_query, *end;
+ uint greatest_key_length= 0;
if (!thd->temporary_tables)
return;
+ /*
+ We write a DROP TEMPORARY TABLE for each temp table left, so that our
+ replication slave can clean them up. Not one multi-table DROP TABLE binlog
+ event: this would cause problems if slave uses --replicate-*-table.
+ */
LINT_INIT(end);
- query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS
+ /* We'll re-use always same buffer so make it big enough for longest name */
for (table=thd->temporary_tables ; table ; table=table->next)
- /*
- We are going to add 4 ` around the db/table names, so 1 does not look
- enough; indeed it is enough, because table->key_length is greater (by 8,
- because of server_id and thread_id) than db||table.
- */
- query_buf_size+= table->key_length+1;
+ greatest_key_length= max(greatest_key_length, table->key_length);
- if ((query = alloc_root(thd->mem_root, query_buf_size)))
+ if ((query = alloc_root(thd->mem_root, greatest_key_length+50)))
// Better add "if exists", in case a RESET MASTER has been done
- end=strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ");
+ name_in_query= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `");
for (table=thd->temporary_tables ; table ; table=next)
{
- if (query) // we might be out of memory, but this is not fatal
+ /*
+ In we are OOM for 'query' this is not fatal. We skip temporary tables
+ not created directly by the user.
+ */
+ if (query && mysql_bin_log.is_open() && (table->real_name[0] != '#'))
{
- // skip temporary tables not created directly by the user
- if (table->real_name[0] != '#')
- found_user_tables = 1;
/*
Here we assume table_cache_key always starts
with \0 terminated db name
*/
- end = strxmov(end,"`",table->table_cache_key,"`.`",
- table->real_name,"`,", NullS);
+ end = strxmov(name_in_query, table->table_cache_key, "`.`",
+ table->real_name, "`", NullS);
+ Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE);
+ /*
+ Imagine the thread had created a temp table, then was doing a SELECT, and
+ the SELECT was killed. Then it's not clever to mark the statement above as
+ "killed", because it's not really a statement updating data, and there
+ are 99.99% chances it will succeed on slave. And, if thread is
+ killed now, it's not clever either.
+ If a real update (one updating a persistent table) was killed on the
+ master, then this real update will be logged with error_code=killed,
+ rightfully causing the slave to stop.
+ */
+ qinfo.error_code= 0;
+ mysql_bin_log.write(&qinfo);
}
next=table->next;
close_temporary(table);
}
- if (query && found_user_tables && mysql_bin_log.is_open())
- {
- /* The -1 is to remove last ',' */
- thd->clear_error();
- Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0, FALSE);
- /*
- Imagine the thread had created a temp table, then was doing a SELECT, and
- the SELECT was killed. Then it's not clever to mark the statement above as
- "killed", because it's not really a statement updating data, and there
- are 99.99% chances it will succeed on slave.
- If a real update (one updating a persistent table) was killed on the
- master, then this real update will be logged with error_code=killed,
- rightfully causing the slave to stop.
- */
- qinfo.error_code= 0;
- mysql_bin_log.write(&qinfo);
- }
thd->temporary_tables=0;
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 85cd24c4fbb..e66eeb279d2 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -660,6 +660,8 @@ static int check_connection(THD *thd)
DBUG_PRINT("info",
("New connection received on %s", vio_description(net->vio)));
+ vio_in_addr(net->vio,&thd->remote.sin_addr);
+
if (!thd->host) // If TCP/IP connection
{
char ip[30];
@@ -704,7 +706,6 @@ static int check_connection(THD *thd)
DBUG_PRINT("info",("Host: %s",thd->host));
thd->host_or_ip= thd->host;
thd->ip= 0;
- bzero((char*) &thd->remote, sizeof(struct sockaddr));
}
vio_keepalive(net->vio, TRUE);
ulong pkt_len= 0;
diff --git a/sql/sql_string.h b/sql/sql_string.h
index a8fb9574c0b..9136dddbbf2 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -182,6 +182,11 @@ public:
{
if (&s != this)
{
+ /*
+ It is forbidden to do assignments like
+ some_string = substring_of_that_string
+ */
+ DBUG_ASSERT(!s.uses_buffer_owned_by(this));
free();
Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length;
alloced=0;
@@ -313,4 +318,9 @@ public:
/* Swap two string objects. Efficient way to exchange data without memcpy. */
void swap(String &s);
+
+ inline bool uses_buffer_owned_by(const String *s) const
+ {
+ return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length);
+ }
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index f3c107c2696..5f3875ba934 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2272,7 +2272,10 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table,
strxmov(src_path, (*tmp_table)->path, reg_ext, NullS);
else
{
- fn_format( src_path, src_table, src_db, reg_ext, MYF(MY_UNPACK_FILENAME));
+ strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table,
+ reg_ext, NullS);
+ /* Resolve symlinks (for windows) */
+ fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME));
if (access(src_path, F_OK))
{
my_error(ER_BAD_TABLE_ERROR, MYF(0), src_table);
@@ -2299,7 +2302,9 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table,
}
else
{
- fn_format( dst_path, table_name, db, reg_ext, MYF(MY_UNPACK_FILENAME));
+ strxmov(dst_path, mysql_data_home, "/", db, "/", table_name,
+ reg_ext, NullS);
+ fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME));
if (!access(dst_path, F_OK))
goto table_exists;
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 027a21db7ac..882316d57d7 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -148,6 +148,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
SELECT_LEX *sl, *first_select;
select_result *tmp_result;
bool is_union;
+ TABLE *empty_table= 0;
DBUG_ENTER("st_select_lex_unit::prepare");
describe= test(additional_options & SELECT_DESCRIBE);
@@ -239,13 +240,21 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
goto err;
if (sl == first_select)
{
+ /*
+ We need to create an empty table object. It is used
+ to create tmp_table fields in Item_type_holder.
+ The main reason of this is that we can't create
+ field object without table.
+ */
+ DBUG_ASSERT(!empty_table);
+ empty_table= (TABLE*) thd->calloc(sizeof(TABLE));
types.empty();
List_iterator_fast<Item> it(sl->item_list);
Item *item_tmp;
while ((item_tmp= it++))
{
/* Error's in 'new' will be detected after loop */
- types.push_back(new Item_type_holder(thd_arg, item_tmp));
+ types.push_back(new Item_type_holder(thd_arg, item_tmp, empty_table));
}
if (thd_arg->is_fatal_error)
@@ -264,7 +273,8 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
Item *type, *item_tmp;
while ((type= tp++, item_tmp= it++))
{
- if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp))
+ if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp,
+ empty_table))
DBUG_RETURN(-1);
}
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 0ec71bdfba3..663f2d2be34 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -167,7 +167,10 @@ int mysql_update(THD *thd,
else if ((used_index=table->file->key_used_on_scan) < MAX_KEY)
used_key_is_modified=check_if_key_used(table, used_index, fields);
else
+ {
used_key_is_modified=0;
+ used_index= MAX_KEY;
+ }
if (used_key_is_modified || order)
{
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 1e51d8fb82d..e70efe14557 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -2455,10 +2455,11 @@ select_into:
select_from:
FROM join_table_list where_clause group_clause having_clause
opt_order_clause opt_limit_clause procedure_clause
- | FROM DUAL_SYM /* oracle compatibility: oracle always requires FROM
- clause, and DUAL is system table without fields.
- Is "SELECT 1 FROM DUAL" any better than
- "SELECT 1" ? Hmmm :) */
+ | FROM DUAL_SYM opt_limit_clause
+ /* oracle compatibility: oracle always requires FROM clause,
+ and DUAL is system table without fields.
+ Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ?
+ Hmmm :) */
;
select_options:
diff --git a/sql/unireg.h b/sql/unireg.h
index 4ab2ba26b15..70df9a89c8f 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -72,6 +72,8 @@
#define PARAM_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-3))
#define OUTER_REF_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-2))
#define RAND_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-1))
+#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \
+ RAND_TABLE_BIT)
#define MAX_FIELDS 4096 /* Limit in the .frm file */
#define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD)
diff --git a/strings/ctype-big5.c b/strings/ctype-big5.c
index 997b8ce93d6..270b02212af 100644
--- a/strings/ctype-big5.c
+++ b/strings/ctype-big5.c
@@ -271,7 +271,7 @@ static int my_strnncollsp_big5(CHARSET_INFO * cs __attribute__((unused)),
if (!res && a_length != b_length)
{
const uchar *end;
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -286,7 +286,7 @@ static int my_strnncollsp_big5(CHARSET_INFO * cs __attribute__((unused)),
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return res;
diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c
index 95c52512243..618879607ec 100644
--- a/strings/ctype-bin.c
+++ b/strings/ctype-bin.c
@@ -157,7 +157,7 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
}
if (a_length != b_length)
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -172,7 +172,7 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/strings/ctype-gbk.c b/strings/ctype-gbk.c
index 731ad58a2fb..9daa9f90f3c 100644
--- a/strings/ctype-gbk.c
+++ b/strings/ctype-gbk.c
@@ -2632,7 +2632,7 @@ static int my_strnncollsp_gbk(CHARSET_INFO * cs __attribute__((unused)),
if (!res && a_length != b_length)
{
const uchar *end;
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -2647,7 +2647,7 @@ static int my_strnncollsp_gbk(CHARSET_INFO * cs __attribute__((unused)),
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return res;
diff --git a/strings/ctype-latin1.c b/strings/ctype-latin1.c
index 32d9a227c2f..4ab101add5b 100644
--- a/strings/ctype-latin1.c
+++ b/strings/ctype-latin1.c
@@ -611,7 +611,7 @@ static int my_strnncollsp_latin1_de(CHARSET_INFO *cs __attribute__((unused)),
if (a != a_end || b != b_end)
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -626,7 +626,7 @@ static int my_strnncollsp_latin1_de(CHARSET_INFO *cs __attribute__((unused)),
for ( ; a < a_end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c
index 731fc460cef..6cf48291c91 100644
--- a/strings/ctype-mb.c
+++ b/strings/ctype-mb.c
@@ -389,7 +389,7 @@ static int my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
}
if (a_length != b_length)
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -404,7 +404,7 @@ static int my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c
index 4dc6a1be27b..1a09b16a264 100644
--- a/strings/ctype-simple.c
+++ b/strings/ctype-simple.c
@@ -143,7 +143,7 @@ int my_strnncollsp_simple(CHARSET_INFO * cs, const uchar *a, uint a_length,
}
if (a_length != b_length)
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -153,12 +153,12 @@ int my_strnncollsp_simple(CHARSET_INFO * cs, const uchar *a, uint a_length,
/* put shorter key in s */
a_length= b_length;
a= b;
- swap= -1^1; /* swap sign of result */
+ swap= -1; /* swap sign of result */
}
for (end= a + a_length-length; a < end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/strings/ctype-sjis.c b/strings/ctype-sjis.c
index c0b33a13cdd..0cb30a9b6ee 100644
--- a/strings/ctype-sjis.c
+++ b/strings/ctype-sjis.c
@@ -251,7 +251,7 @@ static int my_strnncollsp_sjis(CHARSET_INFO *cs __attribute__((unused)),
int res= my_strnncoll_sjis_internal(cs, &a, a_length, &b, b_length);
if (!res && (a != a_end || b != b_end))
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -266,7 +266,7 @@ static int my_strnncollsp_sjis(CHARSET_INFO *cs __attribute__((unused)),
for (; a < a_end ; a++)
{
if (*a != ' ')
- return ((int) *a - (int) ' ') ^ swap;
+ return (*a < ' ') ? -swap : swap;
}
}
return res;
diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c
index 3a43c556ac8..6a6c55d214e 100644
--- a/strings/ctype-tis620.c
+++ b/strings/ctype-tis620.c
@@ -589,7 +589,7 @@ int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)),
}
if (a_length != b_length)
{
- int swap= 0;
+ int swap= 1;
/*
Check the next not space character of the longer key. If it's < ' ',
then it's smaller than the other key.
@@ -605,7 +605,7 @@ int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)),
{
if (*a != ' ')
{
- res= ((int) *a - (int) ' ') ^ swap;
+ res= (*a < ' ') ? -swap : swap;
goto ret;
}
}
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index 936e2b6fdce..ea11f8816a5 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -275,7 +275,7 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)),
if (slen != tlen)
{
- int swap= 0;
+ int swap= 1;
if (slen < tlen)
{
s= t;
@@ -286,7 +286,7 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)),
for ( ; s < se ; s+= 2)
{
if (s[0] || s[1] != ' ')
- return (((int)s[0] << 8) + (int) s[1] - (int) ' ') ^ swap;
+ return (s[0] == 0 && s[1] < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index 502d0ec285e..486d428bf1d 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -2077,7 +2077,7 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs,
if (slen != tlen)
{
- int swap= 0;
+ int swap= 1;
if (slen < tlen)
{
slen= tlen;
@@ -2098,7 +2098,7 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs,
for ( ; s < se; s++)
{
if (*s != ' ')
- return ((int)*s - (int) ' ') ^ swap;
+ return (*s < ' ') ? -swap : swap;
}
}
return 0;
diff --git a/support-files/Makefile.am b/support-files/Makefile.am
index 7ae1071f9ec..0a6077f0efc 100644
--- a/support-files/Makefile.am
+++ b/support-files/Makefile.am
@@ -27,7 +27,8 @@ EXTRA_DIST = mysql.spec.sh \
mysql.server.sh \
binary-configure.sh \
magic \
- MySQL-shared-compat.spec.sh
+ MySQL-shared-compat.spec.sh \
+ ndb-config-2-node.ini.sh
SUBDIRS = MacOSX
@@ -38,7 +39,8 @@ pkgdata_DATA = my-small.cnf \
my-innodb-heavy-4G.cnf \
mysql-log-rotate \
mysql-@VERSION@.spec \
- MySQL-shared-compat.spec
+ MySQL-shared-compat.spec \
+ ndb-config-2-node.ini
pkgdata_SCRIPTS = mysql.server
@@ -52,7 +54,8 @@ CLEANFILES = my-small.cnf \
mysql-log-rotate \
mysql.server \
binary-configure \
- MySQL-shared-compat.spec
+ MySQL-shared-compat.spec \
+ ndb-config-2-node.ini
mysql-@VERSION@.spec: mysql.spec
rm -f $@
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index 99280385965..b06ba462b26 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -104,6 +104,53 @@ This package contains the standard MySQL clients and administration tools.
%description client -l pt_BR
Este pacote contém os clientes padrão para o MySQL.
+%package ndb-storage
+Release: %{release}
+Summary: MySQL - ndbcluster storage engine
+Group: Applications/Databases
+
+%description ndb-storage
+This package contains the ndbcluster storage engine.
+It is necessary to have this package installed on all
+computers that should store ndbcluster table data.
+Note that this storage engine can only be used in conjunction
+with the MySQL Max server.
+
+%{see_base}
+
+%package ndb-management
+Release: %{release}
+Summary: MySQL - ndbcluster storage engine management
+Group: Applications/Databases
+
+%description ndb-management
+This package contains ndbcluster storage engine management.
+It is necessary to have this package installed on at least
+one computer in the cluster.
+
+%{see_base}
+
+%package ndb-tools
+Release: %{release}
+Summary: MySQL - ndbcluster storage engine basic tools
+Group: Applications/Databases
+
+%description ndb-tools
+This package contains ndbcluster storage engine basic tools.
+
+%{see_base}
+
+%package ndb-extra
+Release: %{release}
+Summary: MySQL - ndbcluster storage engine extra tools
+Group: Applications/Databases
+
+%description ndb-extra
+This package contains some extra ndbcluster storage engine tools for the advanced user.
+They should be used with caution.
+
+%{see_base}
+
%package bench
Release: %{release}
Requires: %{name}-client perl-DBI perl
@@ -162,6 +209,7 @@ Requires: MySQL-server >= 4.0
Optional MySQL server binary that supports additional features like:
- Berkeley DB Storage Engine
+ - Ndbcluster Storage Engine interface
- Archive Storage Engine
- CSV Storage Engine
- Example Storage Engine
@@ -279,6 +327,7 @@ BuildMySQL "--enable-shared \
--without-openssl \
--with-berkeley-db \
--with-innodb \
+ --with-ndbcluster \
--with-raid \
--with-archive \
--with-csv-storage-engine \
@@ -293,6 +342,9 @@ BuildMySQL "--enable-shared \
mv sql/mysqld sql/mysqld-max
nm --numeric-sort sql/mysqld-max > sql/mysqld-max.sym
+# Install the ndb binaries
+(cd ndb; make install DESTDIR=$RBR)
+
# Install embedded server library in the build root
install -m 644 libmysqld/libmysqld.a $RBR%{_libdir}/mysql/
@@ -435,6 +487,14 @@ chmod -R og-rw $mysql_datadir/mysql
# Allow safe_mysqld to start mysqld and print a message before we exit
sleep 2
+
+%post ndb-storage
+mysql_clusterdir=/var/lib/mysql-cluster
+
+# Create cluster directory if needed
+if test ! -d $mysql_clusterdir; then mkdir -m755 $mysql_clusterdir; fi
+
+
%post Max
# Restart mysqld, to use the new binary.
echo "Restarting mysqld."
@@ -475,6 +535,7 @@ fi
%doc Docs/manual.{html,ps,texi,txt}
%doc Docs/manual_toc.html
%doc support-files/my-*.cnf
+%doc support-files/ndb-*.ini
%doc %attr(644, root, root) %{_infodir}/mysql.info*
@@ -556,6 +617,32 @@ fi
%postun shared
/sbin/ldconfig
+%files ndb-storage
+%defattr(-,root,root,0755)
+%attr(755, root, root) %{_sbindir}/ndbd
+
+%files ndb-management
+%defattr(-,root,root,0755)
+%attr(755, root, root) %{_sbindir}/ndb_mgmd
+%attr(755, root, root) %{_bindir}/ndb_mgm
+
+%files ndb-tools
+%defattr(-,root,root,0755)
+%attr(755, root, root) %{_bindir}/ndb_mgm
+%attr(755, root, root) %{_bindir}/ndb_restore
+%attr(755, root, root) %{_bindir}/ndb_waiter
+%attr(755, root, root) %{_bindir}/ndb_select_all
+%attr(755, root, root) %{_bindir}/ndb_select_count
+%attr(755, root, root) %{_bindir}/ndb_desc
+%attr(755, root, root) %{_bindir}/ndb_show_tables
+%attr(755, root, root) %{_bindir}/ndb_test_platform
+
+%files ndb-extra
+%defattr(-,root,root,0755)
+%attr(755, root, root) %{_bindir}/ndb_drop_index
+%attr(755, root, root) %{_bindir}/ndb_drop_table
+%attr(755, root, root) %{_bindir}/ndb_delete_all
+
%files devel
%defattr(-, root, root, 0755)
%doc EXCEPTIONS-CLIENT
@@ -607,6 +694,12 @@ fi
# itself - note that they must be ordered by date (important when
# merging BK trees)
%changelog
+* Monday Feb 7 2005 Tomas Ulin <tomas@mysql.com>
+
+- enabled the "Ndbcluster" storage engine for the max binary
+- added extra make install in ndb subdir after Max build to get ndb binaries
+- added packages for ndbcluster storage engine
+
* Fri Jan 14 2005 Lenz Grimmer <lenz@mysql.com>
- replaced obsoleted "BuildPrereq" with "BuildRequires" instead
diff --git a/support-files/ndb-config-2-node.ini.sh b/support-files/ndb-config-2-node.ini.sh
new file mode 100644
index 00000000000..be80f1dd0b3
--- /dev/null
+++ b/support-files/ndb-config-2-node.ini.sh
@@ -0,0 +1,43 @@
+# Example Ndbcluster storage engine config file.
+#
+[ndbd default]
+NoOfReplicas= 2
+MaxNoOfConcurrentOperations= 10000
+DataMemory= 80M
+IndexMemory= 24M
+TimeBetweenWatchDogCheck= 30000
+DataDir= /var/lib/mysql-cluster
+MaxNoOfOrderedIndexes= 512
+
+[ndb_mgmd default]
+DataDir= /var/lib/mysql-cluster
+
+[ndb_mgmd]
+Id=1
+HostName= localhost
+
+[ndbd]
+Id= 2
+HostName= localhost
+
+[ndbd]
+Id= 3
+HostName= localhost
+
+[mysqld]
+Id= 4
+
+[mysqld]
+Id= 5
+
+[mysqld]
+Id= 6
+
+[mysqld]
+Id= 7
+
+# choose an unused port number
+# in this configuration 63132, 63133, and 63134
+# will be used
+[tcp default]
+PortNumber= 63132
diff --git a/vio/viosocket.c b/vio/viosocket.c
index 202d70b6c26..fee97daa943 100644
--- a/vio/viosocket.c
+++ b/vio/viosocket.c
@@ -276,7 +276,7 @@ void vio_in_addr(Vio *vio, struct in_addr *in)
{
DBUG_ENTER("vio_in_addr");
if (vio->localhost)
- bzero((char*) in, sizeof(*in)); /* This should never be executed */
+ bzero((char*) in, sizeof(*in));
else
*in=vio->remote.sin_addr;
DBUG_VOID_RETURN;
diff --git a/vio/viossl.c b/vio/viossl.c
index a489cb98f98..07713c83763 100644
--- a/vio/viossl.c
+++ b/vio/viossl.c
@@ -259,7 +259,7 @@ void vio_ssl_in_addr(Vio *vio, struct in_addr *in)
{
DBUG_ENTER("vio_ssl_in_addr");
if (vio->localhost)
- bzero((char*) in, sizeof(*in)); /* This should never be executed */
+ bzero((char*) in, sizeof(*in));
else
*in=vio->remote.sin_addr;
DBUG_VOID_RETURN;
diff --git a/zlib/Makefile.am b/zlib/Makefile.am
index 58d3811cd7c..e94d184a841 100644
--- a/zlib/Makefile.am
+++ b/zlib/Makefile.am
@@ -16,7 +16,7 @@
# Process this file with automake to create Makefile.in
-noinst_LTLIBRARIES=libz.la
+pkglib_LTLIBRARIES=libz.la
noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \
inftrees.h trees.h zconf.h zlib.h zutil.h