summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <bell@sanja.is.com.ua>2004-11-25 02:27:02 +0200
committerunknown <bell@sanja.is.com.ua>2004-11-25 02:27:02 +0200
commita352372170815bebd65755bc5fff00fdf964cf9b (patch)
tree64b87aad2e52257f0c6cb4d27336e0d1fb516494
parentf88d01932f4a81682267e21022686d3dea4edb78 (diff)
parent00899088a5f576721ecbf3ca235cdfcd691c8a0e (diff)
downloadmariadb-git-a352372170815bebd65755bc5fff00fdf964cf9b.tar.gz
Merge sanja.is.com.ua:/home/bell/mysql/bk/mysql-5.0
into sanja.is.com.ua:/home/bell/mysql/bk/work-join-5.0 sql/item.cc: Auto merged sql/item.h: Auto merged sql/mysql_priv.h: Auto merged sql/sp.cc: Auto merged sql/sql_base.cc: Auto merged sql/sql_lex.cc: Auto merged sql/sql_lex.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_yacc.yy: Auto merged sql/table.h: Auto merged
-rw-r--r--BUILD/FINISH.sh2
-rwxr-xr-xBuild-tools/Do-compile12
-rw-r--r--VC++Files/client/mysqladmin.dsp2
-rw-r--r--client/Makefile.am2
-rw-r--r--client/mysqladmin.cc2
-rw-r--r--extra/perror.c2
-rw-r--r--heap/hp_rfirst.c1
-rw-r--r--include/m_ctype.h2
-rw-r--r--innobase/buf/buf0buf.c3
-rw-r--r--innobase/configure.in3
-rw-r--r--innobase/lock/lock0lock.c3
-rw-r--r--innobase/row/row0sel.c134
-rw-r--r--innobase/trx/trx0purge.c23
-rw-r--r--innobase/trx/trx0undo.c2
-rw-r--r--myisam/ft_boolean_search.c4
-rw-r--r--myisam/mi_create.c10
-rw-r--r--mysql-test/mysql_test_run.c1728
-rw-r--r--mysql-test/r/ctype_latin1.result9
-rw-r--r--mysql-test/r/ctype_uca.result3
-rw-r--r--mysql-test/r/ctype_utf8.result3
-rw-r--r--mysql-test/r/delete.result30
-rw-r--r--mysql-test/r/fulltext.result8
-rw-r--r--mysql-test/r/heap.result7
-rw-r--r--mysql-test/r/information_schema.result57
-rw-r--r--mysql-test/r/information_schema_inno.result19
-rw-r--r--mysql-test/r/innodb.result18
-rw-r--r--mysql-test/r/key.result18
-rw-r--r--mysql-test/r/lowercase_table3.result2
-rw-r--r--mysql-test/r/lowercase_view.result16
-rw-r--r--mysql-test/r/ndb_index_unique.result52
-rw-r--r--mysql-test/r/ps.result21
-rw-r--r--mysql-test/r/range.result10
-rw-r--r--mysql-test/r/subselect.result15
-rw-r--r--mysql-test/r/trigger.result4
-rw-r--r--mysql-test/r/type_blob.result4
-rw-r--r--mysql-test/t/ctype_latin1.test7
-rw-r--r--mysql-test/t/ctype_uca.test5
-rw-r--r--mysql-test/t/ctype_utf8.test6
-rw-r--r--mysql-test/t/delete.test30
-rw-r--r--mysql-test/t/fulltext.test2
-rw-r--r--mysql-test/t/heap.test11
-rw-r--r--mysql-test/t/information_schema.test28
-rw-r--r--mysql-test/t/information_schema_inno.test16
-rw-r--r--mysql-test/t/innodb.test22
-rw-r--r--mysql-test/t/key.test23
-rw-r--r--mysql-test/t/lowercase_view.test15
-rw-r--r--mysql-test/t/ndb_autodiscover.test14
-rw-r--r--mysql-test/t/ndb_index_unique.test34
-rw-r--r--mysql-test/t/ps.test15
-rw-r--r--mysql-test/t/range.test4
-rw-r--r--mysql-test/t/rpl000001.test2
-rw-r--r--mysql-test/t/subselect.test19
-rw-r--r--mysql-test/t/trigger.test6
-rw-r--r--mysql-test/t/type_blob.test4
-rw-r--r--mysys/charset.c11
-rw-r--r--ndb/docs/wl2077.txt35
-rw-r--r--ndb/include/Makefile.am9
-rw-r--r--ndb/include/mgmapi/mgmapi.h28
-rw-r--r--ndb/include/mgmcommon/ConfigRetriever.hpp26
-rw-r--r--ndb/include/ndbapi/NdbConnection.hpp4
-rw-r--r--ndb/include/ndbapi/NdbIndexScanOperation.hpp6
-rw-r--r--ndb/include/ndbapi/NdbResultSet.hpp6
-rw-r--r--ndb/include/ndbapi/NdbScanOperation.hpp11
-rw-r--r--ndb/include/ndbapi/ndb_cluster_connection.hpp2
-rw-r--r--ndb/include/util/ndb_opts.h18
-rw-r--r--ndb/src/common/debugger/signaldata/ScanTab.cpp5
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp163
-rw-r--r--ndb/src/common/util/version.c1
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt3
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp14
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp1
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp20
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp24
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp96
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp11
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp102
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp11
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp2
-rw-r--r--ndb/src/kernel/main.cpp12
-rw-r--r--ndb/src/kernel/vm/Configuration.cpp41
-rw-r--r--ndb/src/kernel/vm/Configuration.hpp3
-rw-r--r--ndb/src/mgmapi/LocalConfig.cpp17
-rw-r--r--ndb/src/mgmapi/LocalConfig.hpp (renamed from ndb/include/mgmapi/LocalConfig.hpp)1
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp138
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp26
-rw-r--r--ndb/src/mgmclient/main.cpp5
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp109
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp13
-rw-r--r--ndb/src/mgmsrv/MgmtSrvrConfig.cpp18
-rw-r--r--ndb/src/mgmsrv/main.cpp73
-rw-r--r--ndb/src/ndbapi/NdbConnection.cpp49
-rw-r--r--ndb/src/ndbapi/NdbConnectionScan.cpp34
-rw-r--r--ndb/src/ndbapi/NdbResultSet.cpp12
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp183
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp30
-rw-r--r--ndb/src/ndbapi/ndberror.c3
-rw-r--r--ndb/test/include/HugoTransactions.hpp16
-rw-r--r--ndb/test/include/NdbRestarter.hpp2
-rw-r--r--ndb/test/include/UtilTransactions.hpp4
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp303
-rw-r--r--ndb/test/ndbapi/testDict.cpp19
-rw-r--r--ndb/test/ndbapi/testReadPerf.cpp11
-rw-r--r--ndb/test/ndbapi/testScan.cpp9
-rw-r--r--ndb/test/ndbapi/testScanPerf.cpp78
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt4
-rw-r--r--ndb/test/run-test/main.cpp22
-rw-r--r--ndb/test/src/HugoTransactions.cpp166
-rw-r--r--ndb/test/src/NDBT_Test.cpp4
-rw-r--r--ndb/test/src/NdbBackup.cpp36
-rw-r--r--ndb/test/src/NdbRestarter.cpp45
-rw-r--r--ndb/test/src/UtilTransactions.cpp23
-rw-r--r--ndb/test/tools/create_index.cpp2
-rw-r--r--ndb/test/tools/hugoScanRead.cpp50
-rw-r--r--ndb/tools/delete_all.cpp2
-rw-r--r--ndb/tools/desc.cpp2
-rw-r--r--ndb/tools/drop_index.cpp2
-rw-r--r--ndb/tools/drop_tab.cpp2
-rw-r--r--ndb/tools/listTables.cpp2
-rw-r--r--ndb/tools/restore/restore_main.cpp2
-rw-r--r--ndb/tools/select_all.cpp11
-rw-r--r--ndb/tools/select_count.cpp2
-rw-r--r--ndb/tools/waiter.cpp48
-rw-r--r--sql/field.cc4
-rw-r--r--sql/ha_myisammrg.cc11
-rw-r--r--sql/ha_myisammrg.h1
-rw-r--r--sql/ha_ndbcluster.cc56
-rw-r--r--sql/ha_ndbcluster.h5
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/item.cc42
-rw-r--r--sql/item.h15
-rw-r--r--sql/item_cmpfunc.cc20
-rw-r--r--sql/mysql_priv.h9
-rw-r--r--sql/mysqld.cc23
-rw-r--r--sql/opt_range.cc6
-rw-r--r--sql/set_var.cc17
-rw-r--r--sql/sp.cc3
-rw-r--r--sql/sp_head.cc8
-rw-r--r--sql/sp_head.h8
-rw-r--r--sql/sql_base.cc5
-rw-r--r--sql/sql_do.cc1
-rw-r--r--sql/sql_lex.cc28
-rw-r--r--sql/sql_lex.h11
-rw-r--r--sql/sql_parse.cc45
-rw-r--r--sql/sql_select.cc13
-rw-r--r--sql/sql_select.h3
-rw-r--r--sql/sql_show.cc26
-rw-r--r--sql/sql_table.cc15
-rw-r--r--sql/sql_trigger.cc107
-rw-r--r--sql/sql_trigger.h3
-rw-r--r--sql/sql_yacc.yy66
-rw-r--r--sql/table.h5
-rw-r--r--strings/ctype-czech.c12
-rw-r--r--strings/ctype-uca.c4
-rw-r--r--strings/ctype-utf8.c26
-rw-r--r--strings/ctype-win1250ch.c12
-rw-r--r--strings/uca-dump.c91
-rw-r--r--tests/client_test.c5
157 files changed, 2601 insertions, 2846 deletions
diff --git a/BUILD/FINISH.sh b/BUILD/FINISH.sh
index 094eb8275d6..81defaa7f52 100644
--- a/BUILD/FINISH.sh
+++ b/BUILD/FINISH.sh
@@ -10,7 +10,7 @@ do
done
commands="\
-$make -k clean || true
+$make -k distclean || true
/bin/rm -rf */.deps/*.P config.cache innobase/config.cache bdb/build_unix/config.cache bdb/dist/autom4te.cache autom4te.cache innobase/autom4te.cache;
aclocal || (echo \"Can't execute aclocal\" && exit 1)
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index f3c20c81a9f..e98c3d84937 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -10,7 +10,7 @@ use Sys::Hostname;
$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env="";
$opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix="";
$opt_tmp=$opt_version_suffix="";
-$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0;
+$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_one_error=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0;
$opt_innodb=$opt_bdb=$opt_raid=$opt_libwrap=$opt_clearlogs=0;
GetOptions(
@@ -37,6 +37,7 @@ GetOptions(
"no-test",
"no-mysqltest",
"no-benchmark",
+ "one-error",
"perl-files=s",
"perl-options=s",
"raid",
@@ -310,6 +311,7 @@ if ($opt_stage <= 2)
$command=$make;
$command.= " $opt_make_options" if (defined($opt_make_options) && $opt_make_options ne "");
safe_system($command);
+ print LOG "Do-compile: Build successful\n";
}
#
@@ -372,11 +374,14 @@ $ENV{"LD_LIBRARY_PATH"}= ("$test_dir/lib" .
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
my $flags= "";
+ my $force= "";
$flags.= " --with-ndbcluster" if ($opt_with_cluster);
+ $flags.= " --force" if (!$opt_one_error);
log_timestamp();
system("mkdir $bench_tmpdir") if (! -d $bench_tmpdir);
safe_cd("${test_dir}/mysql-test");
check_system("./mysql-test-run $flags --tmpdir=$bench_tmpdir --master_port=$mysql_tcp_port --slave_port=$slave_port --ndbcluster_port=$ndbcluster_port --manager-port=$manager_port --no-manager --sleep=10", "tests were successful");
+ # 'mysql-test-run' writes its own final message for log evaluation.
}
#
@@ -555,7 +560,10 @@ Do not run any tests.
Do not run the benchmark test (written in perl)
--no-mysqltest
-Do not run the the mysql-test-run test (Same as 'make test')
+Do not run the mysql-test-run test (Same as 'make test')
+
+--one-error
+Terminate the mysql-test-run test after the first difference (default: use '--force')
--no-perl
Do not compile or install Perl modules, use the system installed ones
diff --git a/VC++Files/client/mysqladmin.dsp b/VC++Files/client/mysqladmin.dsp
index a7e4404e253..7a0b3bec1a7 100644
--- a/VC++Files/client/mysqladmin.dsp
+++ b/VC++Files/client/mysqladmin.dsp
@@ -115,7 +115,7 @@ LINK32=xilink6.exe
# Name "mysqladmin - Win32 classic"
# Begin Source File
-SOURCE=.\mysqladmin.c
+SOURCE=.\mysqladmin.cpp
# End Source File
# End Target
# End Project
diff --git a/client/Makefile.am b/client/Makefile.am
index a78efec1eee..0362c2f3358 100644
--- a/client/Makefile.am
+++ b/client/Makefile.am
@@ -26,8 +26,8 @@ bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow \
mysqldump mysqlimport mysqltest mysqlbinlog mysqlmanagerc mysqlmanager-pwgen
noinst_HEADERS = sql_string.h completion_hash.h my_readline.h \
client_priv.h
-mysqladmin_SOURCES = mysqladmin.cc
mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc
+mysqladmin_SOURCES = mysqladmin.cc
mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS)
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS)
mysql_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) $(DEPLIB)
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index eec0dcb90fe..8491d0df7b5 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -608,7 +608,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
{
char *pos,buff[40];
ulong sec;
- pos=strchr(status,' ');
+ pos= (char*) strchr(status,' ');
*pos++=0;
printf("%s\t\t\t",status); /* print label */
if ((status=str2int(pos,10,0,LONG_MAX,(long*) &sec)))
diff --git a/extra/perror.c b/extra/perror.c
index a28626fd873..1bd4b203120 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -69,7 +69,7 @@ static HA_ERRORS ha_errlist[]=
{
{ 120,"Didn't find key on read or update" },
{ 121,"Duplicate key on write or update" },
- { 123,"Someone has changed the row since it was read; Update with is recoverable" },
+ { 123,"Someone has changed the row since it was read (while the table was locked to prevent it)" },
{ 124,"Wrong index given to function" },
{ 126,"Index file is crashed" },
{ 127,"Record-file is crashed" },
diff --git a/heap/hp_rfirst.c b/heap/hp_rfirst.c
index 1668376ed1c..85548fea212 100644
--- a/heap/hp_rfirst.c
+++ b/heap/hp_rfirst.c
@@ -52,6 +52,7 @@ int heap_rfirst(HP_INFO *info, byte *record, int inx)
my_errno=HA_ERR_END_OF_FILE;
DBUG_RETURN(my_errno);
}
+ DBUG_ASSERT(0); /* TODO fix it */
info->current_record=0;
info->current_hash_ptr=0;
info->update=HA_STATE_PREV_FOUND;
diff --git a/include/m_ctype.h b/include/m_ctype.h
index ddc21070547..26e285b9683 100644
--- a/include/m_ctype.h
+++ b/include/m_ctype.h
@@ -63,7 +63,7 @@ typedef struct unicase_info_st
#define MY_CS_UNICODE 128 /* is a charset is full unicode */
#define MY_CS_READY 256 /* if a charset is initialized */
#define MY_CS_AVAILABLE 512 /* If either compiled-in or loaded*/
-
+#define MY_CS_CSSORT 1024 /* if case sensitive sort order */
#define MY_CHARSET_UNDEFINED 0
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index 376deedabec..d686b559528 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -2152,7 +2152,8 @@ buf_get_latched_pages_number(void)
block = buf_pool_get_nth_block(buf_pool, i);
- if ((block->buf_fix_count != 0) || (block->io_fix != 0))
+ if (((block->buf_fix_count != 0) || (block->io_fix != 0)) &&
+ block->magic_n == BUF_BLOCK_MAGIC_N )
fixed_pages_number++;
}
diff --git a/innobase/configure.in b/innobase/configure.in
index 652291f1f38..d83da9fdc5c 100644
--- a/innobase/configure.in
+++ b/innobase/configure.in
@@ -110,6 +110,9 @@ esac
case "$target" in
i[[4567]]86-*-*)
CFLAGS="$CFLAGS -DUNIV_INTEL_X86";;
+ # The compiler on Linux/S390 does not seem to have inlining
+ s390-*-*)
+ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
esac
AC_OUTPUT(Makefile os/Makefile ut/Makefile btr/Makefile dnl
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c
index 98def88fa31..479952235f0 100644
--- a/innobase/lock/lock0lock.c
+++ b/innobase/lock/lock0lock.c
@@ -4090,6 +4090,9 @@ lock_print_info(
(ulong) ut_dulint_get_low(purge_sys->purge_undo_no));
fprintf(file,
+ "History list length %lu\n", (ulong) trx_sys->rseg_history_len);
+
+ fprintf(file,
"Total number of lock structs in row lock hash table %lu\n",
(ulong) lock_get_n_rec_locks());
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index 71163bc35b6..26d26ca323c 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -638,23 +638,24 @@ row_sel_get_clust_rec(
if (!node->read_view) {
/* Try to place a lock on the index record */
- /* If innodb_locks_unsafe_for_binlog option is used,
- we lock only the record, i.e. next-key locking is
- not used.
- */
- if ( srv_locks_unsafe_for_binlog )
- {
- err = lock_clust_rec_read_check_and_lock(0, clust_rec,
- index,node->row_lock_mode, LOCK_REC_NOT_GAP, thr);
- }
- else
- {
- err = lock_clust_rec_read_check_and_lock(0, clust_rec, index,
- node->row_lock_mode, LOCK_ORDINARY, thr);
-
- }
-
- if (err != DB_SUCCESS) {
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+
+ if (srv_locks_unsafe_for_binlog) {
+ err = lock_clust_rec_read_check_and_lock(0,
+ clust_rec,
+ index, node->row_lock_mode,
+ LOCK_REC_NOT_GAP, thr);
+ } else {
+ err = lock_clust_rec_read_check_and_lock(0,
+ clust_rec,
+ index, node->row_lock_mode,
+ LOCK_ORDINARY, thr);
+ }
+
+ if (err != DB_SUCCESS) {
return(err);
}
@@ -1205,22 +1206,24 @@ rec_loop:
if (!consistent_read) {
- /* If innodb_locks_unsafe_for_binlog option is used,
- we lock only the record, i.e. next-key locking is
- not used.
- */
-
- if ( srv_locks_unsafe_for_binlog )
- {
- err = sel_set_rec_lock(page_rec_get_next(rec), index,
- node->row_lock_mode, LOCK_REC_NOT_GAP, thr);
- }
- else
- {
- err = sel_set_rec_lock(page_rec_get_next(rec), index,
- node->row_lock_mode, LOCK_ORDINARY, thr);
- }
- if (err != DB_SUCCESS) {
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+
+ if (srv_locks_unsafe_for_binlog) {
+ err = sel_set_rec_lock(page_rec_get_next(rec),
+ index,
+ node->row_lock_mode,
+ LOCK_REC_NOT_GAP, thr);
+ } else {
+ err = sel_set_rec_lock(page_rec_get_next(rec),
+ index,
+ node->row_lock_mode,
+ LOCK_ORDINARY, thr);
+ }
+
+ if (err != DB_SUCCESS) {
/* Note that in this case we will store in pcur
the PREDECESSOR of the record we are waiting
the lock for */
@@ -1245,21 +1248,18 @@ rec_loop:
if (!consistent_read) {
/* Try to place a lock on the index record */
- /* If innodb_locks_unsafe_for_binlog option is used,
- we lock only the record, i.e. next-key locking is
- not used.
- */
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
- if ( srv_locks_unsafe_for_binlog )
- {
- err = sel_set_rec_lock(rec, index, node->row_lock_mode,
+ if (srv_locks_unsafe_for_binlog) {
+ err = sel_set_rec_lock(rec, index, node->row_lock_mode,
LOCK_REC_NOT_GAP, thr);
- }
- else
- {
- err = sel_set_rec_lock(rec, index, node->row_lock_mode,
+ } else {
+ err = sel_set_rec_lock(rec, index, node->row_lock_mode,
LOCK_ORDINARY, thr);
- }
+ }
if (err != DB_SUCCESS) {
@@ -3234,8 +3234,7 @@ rec_loop:
we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
- if ( srv_locks_unsafe_for_binlog == FALSE )
- {
+ if (srv_locks_unsafe_for_binlog == FALSE) {
err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_ORDINARY, thr);
@@ -3337,11 +3336,18 @@ rec_loop:
if (prebuilt->select_lock_type != LOCK_NONE
&& set_also_gap_locks) {
- /* Try to place a lock on the index record */
- err = sel_set_rec_lock(rec, index,
+ /* Try to place a gap lock on the index
+ record only if innodb_locks_unsafe_for_binlog
+ option is not set */
+
+ if (srv_locks_unsafe_for_binlog == FALSE) {
+
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_GAP, thr);
+ }
+
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
@@ -3363,11 +3369,18 @@ rec_loop:
if (prebuilt->select_lock_type != LOCK_NONE
&& set_also_gap_locks) {
- /* Try to place a lock on the index record */
- err = sel_set_rec_lock(rec, index,
+ /* Try to place a gap lock on the index
+ record only if innodb_locks_unsafe_for_binlog
+ option is not set */
+
+ if (srv_locks_unsafe_for_binlog == FALSE) {
+
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_GAP, thr);
+ }
+
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
@@ -3401,19 +3414,16 @@ rec_loop:
prebuilt->select_lock_type,
LOCK_REC_NOT_GAP, thr);
} else {
- /* If innodb_locks_unsafe_for_binlog option is used,
- we lock only the record, i.e. next-key locking is
- not used.
- */
- if ( srv_locks_unsafe_for_binlog )
- {
- err = sel_set_rec_lock(rec, index,
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used. */
+
+ if (srv_locks_unsafe_for_binlog) {
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_REC_NOT_GAP, thr);
- }
- else
- {
- err = sel_set_rec_lock(rec, index,
+ } else {
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_ORDINARY, thr);
}
diff --git a/innobase/trx/trx0purge.c b/innobase/trx/trx0purge.c
index 5c62640e011..3df34111281 100644
--- a/innobase/trx/trx0purge.c
+++ b/innobase/trx/trx0purge.c
@@ -289,7 +289,7 @@ trx_purge_add_update_undo_to_history(
flst_get_len(seg_header + TRX_UNDO_PAGE_LIST, mtr));
mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
- hist_size + undo->size, MLOG_4BYTES, mtr);
+ hist_size + undo->size, MLOG_4BYTES, mtr);
}
/* Add the log as the first in the history list */
@@ -646,6 +646,27 @@ trx_purge_rseg_get_next_history_log(
mutex_exit(&(rseg->mutex));
mtr_commit(&mtr);
+ mutex_enter(&kernel_mutex);
+
+ /* Add debug code to track history list corruption reported
+ on the MySQL mailing list on Nov 9, 2004. The fut0lst.c
+ file-based list was corrupt. The prev node pointer was
+ FIL_NULL, even though the list length was over 8 million nodes!
+ We assume that purge truncates the history list in moderate
+ size pieces, and if we here reach the head of the list, the
+ list cannot be longer than 20 000 undo logs now. */
+
+ if (trx_sys->rseg_history_len > 20000) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+" InnoDB: Warning: purge reached the head of the history list,\n"
+"InnoDB: but its length is still reported as %lu! Make a detailed bug\n"
+"InnoDB: report, and post it to bugs.mysql.com\n",
+ (ulong)trx_sys->rseg_history_len);
+ }
+
+ mutex_exit(&kernel_mutex);
+
return;
}
diff --git a/innobase/trx/trx0undo.c b/innobase/trx/trx0undo.c
index c1edc223cbc..8d1518753dd 100644
--- a/innobase/trx/trx0undo.c
+++ b/innobase/trx/trx0undo.c
@@ -1241,7 +1241,7 @@ trx_undo_lists_init(
if (page_no != FIL_NULL
&& srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
-
+
undo = trx_undo_mem_create_at_db_start(rseg, i,
page_no, &mtr);
size += undo->size;
diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c
index 9a51eff88e1..c432ac5a16c 100644
--- a/myisam/ft_boolean_search.c
+++ b/myisam/ft_boolean_search.c
@@ -348,7 +348,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
FTB_EXPR *top_ftbe=ftbe->up->up;
ftbw->docid[0]=HA_OFFSET_ERROR;
for (ftbe=ftbw->up; ftbe != top_ftbe; ftbe=ftbe->up)
- if (ftbe->flags & FTB_FLAG_YES)
+ if (!(ftbe->flags & FTB_FLAG_NO))
ftbe->yweaks++;
ftbe=0;
break;
@@ -356,7 +356,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
}
if (!ftbe)
continue;
- /* 3 */
+ /* 4 */
if (!is_tree_inited(& ftb->no_dupes))
init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t),
_ftb_no_dupes_cmp,0,0,0);
diff --git a/myisam/mi_create.c b/myisam/mi_create.c
index 7fc7cc4edf1..f99a2c655d2 100644
--- a/myisam/mi_create.c
+++ b/myisam/mi_create.c
@@ -16,7 +16,7 @@
/* Create a MyISAM table */
-#include "fulltext.h"
+#include "ftdefs.h"
#include "sp_defs.h"
#if defined(MSDOS) || defined(__WIN__)
@@ -41,7 +41,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
File dfile,file;
int errpos,save_errno;
myf create_flag;
- uint fields,length,max_key_length,packed,pointer,
+ uint fields,length,max_key_length,packed,pointer,real_length_diff,
key_length,info_length,key_segs,options,min_key_length_skip,
base_pos,varchar_count,long_varchar_count,varchar_length,
max_key_block_length,unique_key_parts,fulltext_keys,offset;
@@ -238,7 +238,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
{
share.state.key_root[i]= HA_OFFSET_ERROR;
- min_key_length_skip=length=0;
+ min_key_length_skip=length=real_length_diff=0;
key_length=pointer;
if (keydef->flag & HA_SPATIAL)
{
@@ -297,6 +297,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
key_length+= HA_FT_MAXBYTELEN+HA_FT_WLEN;
length++; /* At least one length byte */
min_key_length_skip+=HA_FT_MAXBYTELEN;
+ real_length_diff=HA_FT_MAXBYTELEN-FT_MAX_WORD_LEN_FOR_SORT;
}
else
{
@@ -397,7 +398,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
key_segs)
share.state.rec_per_key_part[key_segs-1]=1L;
length+=key_length;
- keydef->block_length= MI_BLOCK_SIZE(length,pointer,MI_MAX_KEYPTR_SIZE);
+ keydef->block_length= MI_BLOCK_SIZE(length-real_length_diff,
+ pointer,MI_MAX_KEYPTR_SIZE);
if (keydef->block_length > MI_MAX_KEY_BLOCK_LENGTH ||
length >= MI_MAX_KEY_BUFF)
{
diff --git a/mysql-test/mysql_test_run.c b/mysql-test/mysql_test_run.c
deleted file mode 100644
index 6f388fc4a45..00000000000
--- a/mysql-test/mysql_test_run.c
+++ /dev/null
@@ -1,1728 +0,0 @@
-/*
- Copyright (c) 2002, 2003 Novell, Inc. All Rights Reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#ifndef __WIN__
-#include <dirent.h>
-#endif
-#include <string.h>
-#ifdef __NETWARE__
-#include <screen.h>
-#include <nks/vm.h>
-#endif
-#include <ctype.h>
-#include <sys/stat.h>
-#ifndef __WIN__
-#include <unistd.h>
-#endif
-#include <fcntl.h>
-#ifdef __NETWARE__
-#include <sys/mode.h>
-#endif
-#ifdef __WIN__
-#include <Shlwapi.h>
-#include <direct.h>
-#endif
-
-#include "my_manage.h"
-
-/******************************************************************************
-
- macros
-
-******************************************************************************/
-
-#define HEADER "TEST RESULT \n"
-#define DASH "-------------------------------------------------------\n"
-
-#define NW_TEST_SUFFIX ".nw-test"
-#define NW_RESULT_SUFFIX ".nw-result"
-#define TEST_SUFFIX ".test"
-#define RESULT_SUFFIX ".result"
-#define REJECT_SUFFIX ".reject"
-#define OUT_SUFFIX ".out"
-#define ERR_SUFFIX ".err"
-
-const char *TEST_PASS = "[ pass ]";
-const char *TEST_SKIP = "[ skip ]";
-const char *TEST_FAIL = "[ fail ]";
-const char *TEST_BAD = "[ bad ]";
-const char *TEST_IGNORE = "[ignore]";
-
-/******************************************************************************
-
- global variables
-
-******************************************************************************/
-#ifdef __NETWARE__
-static char base_dir[PATH_MAX] = "sys:/mysql";
-#else
-static char base_dir[PATH_MAX] = "..";
-#endif
-static char db[PATH_MAX] = "test";
-static char user[PATH_MAX] = "root";
-static char password[PATH_MAX] = "";
-
-int master_port = 9306;
-int slave_port = 9307;
-
-#if !defined(__NETWARE__) && !defined(__WIN__)
-static char master_socket[PATH_MAX] = "./var/tmp/master.sock";
-static char slave_socket[PATH_MAX] = "./var/tmp/slave.sock";
-#endif
-
-// comma delimited list of tests to skip or empty string
-#ifndef __WIN__
-static char skip_test[PATH_MAX] = " lowercase_table3 , system_mysql_db_fix ";
-#else
-/*
- The most ignore testes contain the calls of system command
-*/
-#define MAX_COUNT_TESTES 1024
-/*
- lowercase_table3 is disabled by Gerg
- system_mysql_db_fix is disabled by Gerg
- sp contains a command system
- rpl_EE_error contains a command system
- rpl_loaddatalocal contains a command system
- ndb_autodiscover contains a command system
- rpl_rotate_logs contains a command system
- repair contains a command system
- rpl_trunc_binlog contains a command system
- mysqldump contains a command system
- rpl000001 makes non-exit loop...temporary skiped
-*/
-static char skip_test[PATH_MAX] = " lowercase_table3 , system_mysql_db_fix , sp , rpl_EE_error , rpl_loaddatalocal , ndb_autodiscover , rpl_rotate_logs , repair , rpl_trunc_binlog , mysqldump , rpl000001 ";
-#endif
-static char ignore_test[PATH_MAX] = "";
-
-static char bin_dir[PATH_MAX];
-static char mysql_test_dir[PATH_MAX];
-static char test_dir[PATH_MAX];
-static char mysql_tmp_dir[PATH_MAX];
-static char result_dir[PATH_MAX];
-static char master_dir[PATH_MAX];
-static char slave_dir[PATH_MAX];
-static char lang_dir[PATH_MAX];
-static char char_dir[PATH_MAX];
-
-static char mysqladmin_file[PATH_MAX];
-static char mysqld_file[PATH_MAX];
-static char mysqltest_file[PATH_MAX];
-#ifndef __WIN__
-static char master_pid[PATH_MAX];
-static char slave_pid[PATH_MAX];
-static char sh_file[PATH_MAX] = "/bin/sh";
-#else
-static HANDLE master_pid;
-static HANDLE slave_pid;
-#endif
-
-static char master_opt[PATH_MAX] = "";
-static char slave_opt[PATH_MAX] = "";
-
-static char slave_master_info[PATH_MAX] = "";
-
-static char master_init_script[PATH_MAX] = "";
-static char slave_init_script[PATH_MAX] = "";
-
-// OpenSSL
-static char ca_cert[PATH_MAX];
-static char server_cert[PATH_MAX];
-static char server_key[PATH_MAX];
-static char client_cert[PATH_MAX];
-static char client_key[PATH_MAX];
-
-int total_skip = 0;
-int total_pass = 0;
-int total_fail = 0;
-int total_test = 0;
-
-int total_ignore = 0;
-
-int use_openssl = FALSE;
-int master_running = FALSE;
-int slave_running = FALSE;
-int skip_slave = TRUE;
-int single_test = TRUE;
-
-int restarts = 0;
-
-FILE *log_fd = NULL;
-
-/******************************************************************************
-
- functions
-
-******************************************************************************/
-
-/******************************************************************************
-
- prototypes
-
-******************************************************************************/
-
-void report_stats();
-void install_db(char *);
-void mysql_install_db();
-void start_master();
-void start_slave();
-void mysql_start();
-void stop_slave();
-void stop_master();
-void mysql_stop();
-void mysql_restart();
-int read_option(char *, char *);
-void run_test(char *);
-void setup(char *);
-void vlog(const char *, va_list);
-void mlog(const char *, ...);
-void log_info(const char *, ...);
-void log_error(const char *, ...);
-void log_errno(const char *, ...);
-void die(const char *);
-char *str_tok(char *string, const char *delim);
-#ifndef __WIN__
-void run_init_script(const char *script_name);
-#endif
-/******************************************************************************
-
- report_stats()
-
- Report the gathered statistics.
-
-******************************************************************************/
-void report_stats()
-{
- if (total_fail == 0)
- {
- mlog("\nAll %d test(s) were successful.\n", total_test);
- }
- else
- {
- double percent = ((double)total_pass / total_test) * 100;
-
- mlog("\nFailed %u/%u test(s), %.02f%% successful.\n",
- total_fail, total_test, percent);
- mlog("\nThe .out and .err files in %s may give you some\n", result_dir);
- mlog("hint of what when wrong.\n");
- mlog("\nIf you want to report this error, please first read the documentation\n");
- mlog("at: http://www.mysql.com/doc/M/y/MySQL_test_suite.html\n");
- }
-}
-
-/******************************************************************************
-
- install_db()
-
- Install the a database.
-
-******************************************************************************/
-void install_db(char *datadir)
-{
- arg_list_t al;
- int err;
- char input[PATH_MAX];
- char output[PATH_MAX];
- char error[PATH_MAX];
-
- // input file
-#ifdef __NETWARE__
- snprintf(input, PATH_MAX, "%s/bin/init_db.sql", base_dir);
-#else
- snprintf(input, PATH_MAX, "%s/mysql-test/init_db.sql", base_dir);
-#endif
- snprintf(output, PATH_MAX, "%s/install.out", datadir);
- snprintf(error, PATH_MAX, "%s/install.err", datadir);
-
- // args
- init_args(&al);
- add_arg(&al, mysqld_file);
- add_arg(&al, "--no-defaults");
- add_arg(&al, "--bootstrap");
- add_arg(&al, "--skip-grant-tables");
- add_arg(&al, "--basedir=%s", base_dir);
- add_arg(&al, "--datadir=%s", datadir);
- add_arg(&al, "--skip-innodb");
- add_arg(&al, "--skip-bdb");
-#ifndef __NETWARE__
- add_arg(&al, "--character-sets-dir=%s", char_dir);
- add_arg(&al, "--language=%s", lang_dir);
-#endif
-
- // spawn
- if ((err = spawn(mysqld_file, &al, TRUE, input, output, error, NULL)) != 0)
- {
- die("Unable to create database.");
- }
-
- // free args
- free_args(&al);
-}
-
-/******************************************************************************
-
- mysql_install_db()
-
- Install the test databases.
-
-******************************************************************************/
-void mysql_install_db()
-{
- char temp[PATH_MAX];
-
- // var directory
- snprintf(temp, PATH_MAX, "%s/var", mysql_test_dir);
-
- // clean up old direcotry
- del_tree(temp);
-
- // create var directory
-#ifndef __WIN__
- mkdir(temp, S_IRWXU);
- // create subdirectories
- mlog("Creating test-suite folders...\n");
- snprintf(temp, PATH_MAX, "%s/var/run", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/tmp", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/master-data", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/master-data/mysql", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/master-data/test", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/slave-data", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/slave-data/mysql", mysql_test_dir);
- mkdir(temp, S_IRWXU);
- snprintf(temp, PATH_MAX, "%s/var/slave-data/test", mysql_test_dir);
- mkdir(temp, S_IRWXU);
-#else
- mkdir(temp);
- // create subdirectories
- mlog("Creating test-suite folders...\n");
- snprintf(temp, PATH_MAX, "%s/var/run", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/tmp", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/master-data", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/master-data/mysql", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/master-data/test", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/slave-data", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/slave-data/mysql", mysql_test_dir);
- mkdir(temp);
- snprintf(temp, PATH_MAX, "%s/var/slave-data/test", mysql_test_dir);
- mkdir(temp);
-#endif
-
- // install databases
- mlog("Creating test databases for master... \n");
- install_db(master_dir);
- mlog("Creating test databases for slave... \n");
- install_db(slave_dir);
-}
-
-/******************************************************************************
-
- start_master()
-
- Start the master server.
-
-******************************************************************************/
-void start_master()
-{
- arg_list_t al;
- int err;
- char master_out[PATH_MAX];
- char master_err[PATH_MAX];
-// char temp[PATH_MAX];
- char temp2[PATH_MAX];
-
- // remove old berkeley db log files that can confuse the server
- removef("%s/log.*", master_dir);
-
- // remove stale binary logs
- removef("%s/var/log/*-bin.*", mysql_test_dir);
-
- // remove stale binary logs
- removef("%s/var/log/*.index", mysql_test_dir);
-
- // remove master.info file
- removef("%s/master.info", master_dir);
-
- // remove relay files
- removef("%s/var/log/*relay*", mysql_test_dir);
-
- // remove relay-log.info file
- removef("%s/relay-log.info", master_dir);
-
- // init script
- if (master_init_script[0] != 0)
- {
-#ifdef __NETWARE__
- // TODO: use the scripts
- if (strinstr(master_init_script, "repair_part2-master.sh") != 0)
- {
- FILE *fp;
-
- // create an empty index file
- snprintf(temp, PATH_MAX, "%s/test/t1.MYI", master_dir);
- fp = fopen(temp, "wb+");
-
- fputs("1", fp);
-
- fclose(fp);
- }
-#elif !defined(__WIN__)
- run_init_script(master_init_script);
-#endif
- }
-
- // redirection files
- snprintf(master_out, PATH_MAX, "%s/var/run/master%u.out",
- mysql_test_dir, restarts);
- snprintf(master_err, PATH_MAX, "%s/var/run/master%u.err",
- mysql_test_dir, restarts);
-#ifndef __WIN__
- snprintf(temp2,PATH_MAX,"%s/var",mysql_test_dir);
- mkdir(temp2,S_IRWXU);
- snprintf(temp2,PATH_MAX,"%s/var/log",mysql_test_dir);
- mkdir(temp2,S_IRWXU);
-#else
- snprintf(temp2,PATH_MAX,"%s/var",mysql_test_dir);
- mkdir(temp2);
- snprintf(temp2,PATH_MAX,"%s/var/log",mysql_test_dir);
- mkdir(temp2);
-#endif
- // args
- init_args(&al);
- add_arg(&al, "%s", mysqld_file);
- add_arg(&al, "--no-defaults");
- add_arg(&al, "--log-bin=%s/var/log/master-bin",mysql_test_dir);
- add_arg(&al, "--server-id=1");
- add_arg(&al, "--basedir=%s", base_dir);
- add_arg(&al, "--port=%u", master_port);
-#if !defined(__NETWARE__) && !defined(__WIN__)
- add_arg(&al, "--socket=%s",master_socket);
-#endif
- add_arg(&al, "--local-infile");
- add_arg(&al, "--core");
- add_arg(&al, "--datadir=%s", master_dir);
-#ifndef __WIN__
- add_arg(&al, "--pid-file=%s", master_pid);
-#endif
- add_arg(&al, "--character-sets-dir=%s", char_dir);
- add_arg(&al, "--tmpdir=%s", mysql_tmp_dir);
- add_arg(&al, "--language=%s", lang_dir);
-#ifdef DEBUG //only for debug builds
- add_arg(&al, "--debug");
-#endif
-
- if (use_openssl)
- {
- add_arg(&al, "--ssl-ca=%s", ca_cert);
- add_arg(&al, "--ssl-cert=%s", server_cert);
- add_arg(&al, "--ssl-key=%s", server_key);
- }
-
- // $MASTER_40_ARGS
- add_arg(&al, "--rpl-recovery-rank=1");
- add_arg(&al, "--init-rpl-role=master");
-
- // $SMALL_SERVER
- add_arg(&al, "-O");
- add_arg(&al, "key_buffer_size=1M");
- add_arg(&al, "-O");
- add_arg(&al, "sort_buffer=256K");
- add_arg(&al, "-O");
- add_arg(&al, "max_heap_table_size=1M");
-
- // $EXTRA_MASTER_OPT
- if (master_opt[0] != 0)
- {
- char *p;
-
- p = (char *)str_tok(master_opt, " \t");
- if (!strstr(master_opt, "timezone"))
- {
- while (p)
- {
- add_arg(&al, "%s", p);
- p = (char *)str_tok(NULL, " \t");
- }
- }
- }
-
- // remove the pid file if it exists
-#ifndef __WIN__
- remove(master_pid);
-#endif
-
- // spawn
-#ifdef __WIN__
- if ((err= spawn(mysqld_file, &al, FALSE, NULL, master_out, master_err, &master_pid)) == 0)
-#else
- if ((err= spawn(mysqld_file, &al, FALSE, NULL, master_out, master_err, master_pid)) == 0)
-#endif
- {
- sleep_until_file_exists(master_pid);
-
- if ((err = wait_for_server_start(bin_dir, mysqladmin_file, user, password, master_port,
- mysql_tmp_dir)) == 0)
- {
- master_running = TRUE;
- }
- else
- {
- log_error("The master server went down early.");
- }
- }
- else
- {
- log_error("Unable to start master server.");
- }
-
- // free_args
- free_args(&al);
-}
-
-/******************************************************************************
-
- start_slave()
-
- Start the slave server.
-
-******************************************************************************/
-void start_slave()
-{
- arg_list_t al;
- int err;
- char slave_out[PATH_MAX];
- char slave_err[PATH_MAX];
-
- // skip?
- if (skip_slave) return;
-
- // remove stale binary logs
- removef("%s/*-bin.*", slave_dir);
-
- // remove stale binary logs
- removef("%s/*.index", slave_dir);
-
- // remove master.info file
- removef("%s/master.info", slave_dir);
-
- // remove relay files
- removef("%s/var/log/*relay*", mysql_test_dir);
-
- // remove relay-log.info file
- removef("%s/relay-log.info", slave_dir);
-
- // init script
- if (slave_init_script[0] != 0)
- {
-#ifdef __NETWARE__
- // TODO: use the scripts
- if (strinstr(slave_init_script, "rpl000016-slave.sh") != 0)
- {
- // create empty master.info file
- snprintf(temp, PATH_MAX, "%s/master.info", slave_dir);
- close(open(temp, O_WRONLY | O_CREAT,S_IRWXU|S_IRWXG|S_IRWXO));
- }
- else if (strinstr(slave_init_script, "rpl000017-slave.sh") != 0)
- {
- FILE *fp;
-
- // create a master.info file
- snprintf(temp, PATH_MAX, "%s/master.info", slave_dir);
- fp = fopen(temp, "wb+");
-
- fputs("master-bin.000001\n", fp);
- fputs("4\n", fp);
- fputs("127.0.0.1\n", fp);
- fputs("replicate\n", fp);
- fputs("aaaaaaaaaaaaaaab\n", fp);
- fputs("9306\n", fp);
- fputs("1\n", fp);
- fputs("0\n", fp);
-
- fclose(fp);
- }
- else if (strinstr(slave_init_script, "rpl_rotate_logs-slave.sh") != 0)
- {
- // create empty master.info file
- snprintf(temp, PATH_MAX, "%s/master.info", slave_dir);
- close(open(temp, O_WRONLY | O_CREAT,S_IRWXU|S_IRWXG|S_IRWXO));
- }
-#elif !defined(__WIN__)
- run_init_script(slave_init_script);
-#endif
- }
-
- // redirection files
- snprintf(slave_out, PATH_MAX, "%s/var/run/slave%u.out",
- mysql_test_dir, restarts);
- snprintf(slave_err, PATH_MAX, "%s/var/run/slave%u.err",
- mysql_test_dir, restarts);
-
- // args
- init_args(&al);
- add_arg(&al, "%s", mysqld_file);
- add_arg(&al, "--no-defaults");
- add_arg(&al, "--log-bin=slave-bin");
- add_arg(&al, "--relay_log=slave-relay-bin");
- add_arg(&al, "--basedir=%s", base_dir);
- add_arg(&al, "--port=%u", slave_port);
-#if !defined(__NETWARE__) && !defined(__WIN__)
- add_arg(&al, "--socket=%s",slave_socket);
-#endif
- add_arg(&al, "--datadir=%s", slave_dir);
-#ifndef __WIN__
- add_arg(&al, "--pid-file=%s", slave_pid);
-#endif
- add_arg(&al, "--character-sets-dir=%s", char_dir);
- add_arg(&al, "--core");
- add_arg(&al, "--tmpdir=%s", mysql_tmp_dir);
- add_arg(&al, "--language=%s", lang_dir);
-
- add_arg(&al, "--exit-info=256");
- add_arg(&al, "--log-slave-updates");
- add_arg(&al, "--init-rpl-role=slave");
- add_arg(&al, "--skip-innodb");
- add_arg(&al, "--skip-slave-start");
- add_arg(&al, "--slave-load-tmpdir=../../var/tmp");
-
- add_arg(&al, "--report-user=%s", user);
- add_arg(&al, "--report-host=127.0.0.1");
- add_arg(&al, "--report-port=%u", slave_port);
-
- add_arg(&al, "--master-retry-count=10");
- add_arg(&al, "-O");
- add_arg(&al, "slave_net_timeout=10");
-#ifdef DEBUG //only for debug builds
- add_arg(&al, "--debug");
-#endif
-
- if (use_openssl)
- {
- add_arg(&al, "--ssl-ca=%s", ca_cert);
- add_arg(&al, "--ssl-cert=%s", server_cert);
- add_arg(&al, "--ssl-key=%s", server_key);
- }
-
- // slave master info
- if (slave_master_info[0] != 0)
- {
- char *p;
-
- p = (char *)str_tok(slave_master_info, " \t");
-
- while(p)
- {
- add_arg(&al, "%s", p);
-
- p = (char *)str_tok(NULL, " \t");
- }
- }
- else
- {
- add_arg(&al, "--master-user=%s", user);
- add_arg(&al, "--master-password=%s", password);
- add_arg(&al, "--master-host=127.0.0.1");
- add_arg(&al, "--master-port=%u", master_port);
- add_arg(&al, "--master-connect-retry=1");
- add_arg(&al, "--server-id=2");
- add_arg(&al, "--rpl-recovery-rank=2");
- }
-
- // small server
- add_arg(&al, "-O");
- add_arg(&al, "key_buffer_size=1M");
- add_arg(&al, "-O");
- add_arg(&al, "sort_buffer=256K");
- add_arg(&al, "-O");
- add_arg(&al, "max_heap_table_size=1M");
-
-
- // opt args
- if (slave_opt[0] != 0)
- {
- char *p;
-
- p = (char *)str_tok(slave_opt, " \t");
-
- while(p)
- {
- add_arg(&al, "%s", p);
-
- p = (char *)str_tok(NULL, " \t");
- }
- }
-
- // remove the pid file if it exists
-#ifndef __WIN__
- remove(slave_pid);
-#endif
- // spawn
-#ifdef __WIN__
- if ((err = spawn(mysqld_file, &al, FALSE, NULL, slave_out, slave_err, &slave_pid)) == 0)
-#else
- if ((err = spawn(mysqld_file, &al, FALSE, NULL, slave_out, slave_err, slave_pid)) == 0)
-#endif
- {
- sleep_until_file_exists(slave_pid);
-
- if ((err = wait_for_server_start(bin_dir, mysqladmin_file, user, password, slave_port,
- mysql_tmp_dir)) == 0)
- {
- slave_running = TRUE;
- }
- else
- {
- log_error("The slave server went down early.");
- }
- }
- else
- {
- log_error("Unable to start slave server.");
- }
-
- // free args
- free_args(&al);
-}
-
-/******************************************************************************
-
- mysql_start()
-
- Start the mysql servers.
-
-******************************************************************************/
-void mysql_start()
-{
-// log_info("Starting the MySQL server(s): %u", ++restarts);
- start_master();
-
- start_slave();
-
- // activate the test screen
-#ifdef __NETWARE__
- ActivateScreen(getscreenhandle());
-#endif
-}
-
-/******************************************************************************
-
- stop_slave()
-
- Stop the slave server.
-
-******************************************************************************/
-void stop_slave()
-{
- int err;
-
- // running?
- if (!slave_running) return;
-
- // stop
- if ((err = stop_server(bin_dir, mysqladmin_file, user, password, slave_port, slave_pid,
- mysql_tmp_dir)) == 0)
- {
- slave_running = FALSE;
- }
- else
- {
- log_error("Unable to stop slave server.");
- }
-}
-
-/******************************************************************************
-
- stop_master()
-
- Stop the master server.
-
-******************************************************************************/
-void stop_master()
-{
- int err;
-
- // running?
- if (!master_running) return;
-
- if ((err = stop_server(bin_dir, mysqladmin_file, user, password, master_port, master_pid,
- mysql_tmp_dir)) == 0)
- {
- master_running = FALSE;
- }
- else
- {
- log_error("Unable to stop master server.");
- }
-}
-
-/******************************************************************************
-
- mysql_stop()
-
- Stop the mysql servers.
-
-******************************************************************************/
-void mysql_stop()
-{
-
- stop_master();
-
- stop_slave();
-
- // activate the test screen
-#ifdef __NETWARE__
- ActivateScreen(getscreenhandle());
-#endif
-}
-
-/******************************************************************************
-
- mysql_restart()
-
- Restart the mysql servers.
-
-******************************************************************************/
-void mysql_restart()
-{
-// log_info("Restarting the MySQL server(s): %u", ++restarts);
-
- mysql_stop();
-
- mlog(DASH);
-
- mysql_start();
-}
-
-/******************************************************************************
-
- read_option()
-
- Read the option file.
-
-******************************************************************************/
-int read_option(char *opt_file, char *opt)
-{
- int fd, err;
- char *p;
- char buf[PATH_MAX];
-
- // copy current option
- strncpy(buf, opt, PATH_MAX);
-
- // open options file
- fd = open(opt_file, O_RDONLY);
-
- err = read(fd, opt, PATH_MAX);
-
- close(fd);
-
- if (err > 0)
- {
- // terminate string
- if ((p = strchr(opt, '\n')) != NULL)
- {
- *p = 0;
-
- // check for a '\r'
- if ((p = strchr(opt, '\r')) != NULL)
- {
- *p = 0;
- }
- }
- else
- {
- opt[err] = 0;
- }
-
- // check for $MYSQL_TEST_DIR
- if ((p = strstr(opt, "$MYSQL_TEST_DIR")) != NULL)
- {
- char temp[PATH_MAX];
-
- *p = 0;
-
- strcpy(temp, p + strlen("$MYSQL_TEST_DIR"));
-
- strcat(opt, mysql_test_dir);
-
- strcat(opt, temp);
- }
- // Check for double backslash and replace it with single bakslash
- if ((p = strstr(opt, "\\\\")) != NULL)
- {
- /* bmove is guranteed to work byte by byte */
- bmove(p, p+1, strlen(p+1));
- }
- }
- else
- {
- // clear option
- *opt = 0;
- }
-
- // compare current option with previous
- return strcmp(opt, buf);
-}
-
-/******************************************************************************
-
- run_test()
-
- Run the given test case.
-
-******************************************************************************/
-void run_test(char *test)
-{
- char temp[PATH_MAX];
- const char *rstr;
- int skip = FALSE, ignore=FALSE;
- int restart = FALSE;
- int flag = FALSE;
- struct stat info;
-
- // skip tests in the skip list
- snprintf(temp, PATH_MAX, " %s ", test);
- skip = (strinstr(skip_test, temp) != 0);
- if (skip == FALSE)
- ignore = (strinstr(ignore_test, temp) != 0);
-
- snprintf(master_init_script, PATH_MAX, "%s/%s-master.sh", test_dir, test);
- snprintf(slave_init_script, PATH_MAX, "%s/%s-slave.sh", test_dir, test);
-#ifdef __WIN__
- if (! stat(master_init_script, &info))
- skip = TRUE;
- if (!stat(slave_init_script, &info))
- skip = TRUE;
-#endif
- if (ignore)
- {
- // show test
- mlog("%-46s ", test);
-
- // ignore
- rstr = TEST_IGNORE;
- ++total_ignore;
- }
- else if (!skip) // skip test?
- {
- char test_file[PATH_MAX];
- char master_opt_file[PATH_MAX];
- char slave_opt_file[PATH_MAX];
- char slave_master_info_file[PATH_MAX];
- char result_file[PATH_MAX];
- char reject_file[PATH_MAX];
- char out_file[PATH_MAX];
- char err_file[PATH_MAX];
- int err;
- arg_list_t al;
-#ifdef __WIN__
- /*
- Clean test database
- */
- removef("%s/test/*.*", master_dir);
- removef("%s/test/*.*", slave_dir);
- removef("%s/mysqltest/*.*", master_dir);
- removef("%s/mysqltest/*.*", slave_dir);
-
-#endif
- // skip slave?
- flag = skip_slave;
- skip_slave = (strncmp(test, "rpl", 3) != 0);
- if (flag != skip_slave) restart = TRUE;
-
- // create files
- snprintf(master_opt_file, PATH_MAX, "%s/%s-master.opt", test_dir, test);
- snprintf(slave_opt_file, PATH_MAX, "%s/%s-slave.opt", test_dir, test);
- snprintf(slave_master_info_file, PATH_MAX, "%s/%s.slave-mi", test_dir, test);
- snprintf(reject_file, PATH_MAX, "%s/%s%s", result_dir, test, REJECT_SUFFIX);
- snprintf(out_file, PATH_MAX, "%s/%s%s", result_dir, test, OUT_SUFFIX);
- snprintf(err_file, PATH_MAX, "%s/%s%s", result_dir, test, ERR_SUFFIX);
-
- // netware specific files
- snprintf(test_file, PATH_MAX, "%s/%s%s", test_dir, test, NW_TEST_SUFFIX);
- if (stat(test_file, &info))
- {
- snprintf(test_file, PATH_MAX, "%s/%s%s", test_dir, test, TEST_SUFFIX);
- if (access(test_file,0))
- {
- printf("Invalid test name %s, %s file not found\n",test,test_file);
- return;
- }
- }
-
- snprintf(result_file, PATH_MAX, "%s/%s%s", result_dir, test, NW_RESULT_SUFFIX);
- if (stat(result_file, &info))
- {
- snprintf(result_file, PATH_MAX, "%s/%s%s", result_dir, test, RESULT_SUFFIX);
- }
-
- // init scripts
- if (stat(master_init_script, &info))
- master_init_script[0] = 0;
- else
- restart = TRUE;
-
- if (stat(slave_init_script, &info))
- slave_init_script[0] = 0;
- else
- restart = TRUE;
-
- // read options
- if (read_option(master_opt_file, master_opt)) restart = TRUE;
- if (read_option(slave_opt_file, slave_opt)) restart = TRUE;
- if (read_option(slave_master_info_file, slave_master_info)) restart = TRUE;
-
- // cleanup previous run
- remove(reject_file);
- remove(out_file);
- remove(err_file);
-
- // start or restart?
- if (!master_running) mysql_start();
- else if (restart) mysql_restart();
-
- // let the system stabalize
- sleep(1);
-
- // show test
- mlog("%-46s ", test);
-
-
- // args
- init_args(&al);
- add_arg(&al, "%s", mysqltest_file);
- add_arg(&al, "--no-defaults");
- add_arg(&al, "--port=%u", master_port);
-#if !defined(__NETWARE__) && !defined(__WIN__)
- add_arg(&al, "--socket=%s", master_socket);
- add_arg(&al, "--tmpdir=%s", mysql_tmp_dir);
-#endif
- add_arg(&al, "--database=%s", db);
- add_arg(&al, "--user=%s", user);
- add_arg(&al, "--password=%s", password);
- add_arg(&al, "--silent");
- add_arg(&al, "--basedir=%s/", mysql_test_dir);
- add_arg(&al, "--host=127.0.0.1");
- add_arg(&al, "-v");
- add_arg(&al, "-R");
- add_arg(&al, "%s", result_file);
-
- if (use_openssl)
- {
- add_arg(&al, "--ssl-ca=%s", ca_cert);
- add_arg(&al, "--ssl-cert=%s", client_cert);
- add_arg(&al, "--ssl-key=%s", client_key);
- }
-
- // spawn
- err = spawn(mysqltest_file, &al, TRUE, test_file, out_file, err_file, NULL);
-
- // free args
- free_args(&al);
-
- remove_empty_file(out_file);
- remove_empty_file(err_file);
-
- if (err == 0)
- {
- // pass
- rstr = TEST_PASS;
- ++total_pass;
-
- // increment total
- ++total_test;
- }
- else if (err == 2)
- {
- // skip
- rstr = TEST_SKIP;
- ++total_skip;
- }
- else if (err == 1)
- {
- // fail
- rstr = TEST_FAIL;
- ++total_fail;
-
- // increment total
- ++total_test;
- }
- else
- {
- rstr = TEST_BAD;
- }
- }
- else // early skips
- {
- // show test
- mlog("%-46s ", test);
-
- // skip
- rstr = TEST_SKIP;
- ++total_skip;
- }
-
- // result
- mlog("%-14s\n", rstr);
-}
-
-/******************************************************************************
-
- vlog()
-
- Log the message.
-
-******************************************************************************/
-void vlog(const char *format, va_list ap)
-{
- vfprintf(stdout, format, ap);
- fflush(stdout);
-
- if (log_fd)
- {
- vfprintf(log_fd, format, ap);
- fflush(log_fd);
- }
-}
-
-/******************************************************************************
-
- log()
-
- Log the message.
-
-******************************************************************************/
-void mlog(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
-
- vlog(format, ap);
-
- va_end(ap);
-}
-
-/******************************************************************************
-
- log_info()
-
- Log the given information.
-
-******************************************************************************/
-void log_info(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
-
- mlog("-- INFO : ");
- vlog(format, ap);
- mlog("\n");
-
- va_end(ap);
-}
-
-/******************************************************************************
-
- log_error()
-
- Log the given error.
-
-******************************************************************************/
-void log_error(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
-
- mlog("-- ERROR: ");
- vlog(format, ap);
- mlog("\n");
-
- va_end(ap);
-}
-
-/******************************************************************************
-
- log_errno()
-
- Log the given error and errno.
-
-******************************************************************************/
-void log_errno(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
-
- mlog("-- ERROR: (%003u) ", errno);
- vlog(format, ap);
- mlog("\n");
-
- va_end(ap);
-}
-
-/******************************************************************************
-
- die()
-
- Exit the application.
-
-******************************************************************************/
-void die(const char *msg)
-{
- log_error(msg);
-#ifdef __NETWARE__
- pressanykey();
-#endif
- exit(-1);
-}
-
-/******************************************************************************
-
- setup()
-
- Setup the mysql test enviornment.
-
-******************************************************************************/
-void setup(char *file)
-{
- char temp[PATH_MAX];
- char file_path[PATH_MAX*2];
- char *p;
- int position;
-
- // set the timezone for the timestamp test
-#ifdef __WIN__
- _putenv( "TZ=GMT-3" );
-#else
- setenv("TZ", "GMT-3", TRUE);
-#endif
- // find base dir
-#ifdef __NETWARE__
- strcpy(temp, strlwr(file));
- while((p = strchr(temp, '\\')) != NULL) *p = '/';
-#else
- getcwd(temp, PATH_MAX);
- position = strlen(temp);
- temp[position] = '/';
- temp[position+1] = 0;
-#ifdef __WIN__
- while((p = strchr(temp, '\\')) != NULL) *p = '/';
-#endif
-#endif
-
- if ((position = strinstr(temp, "/mysql-test/")) != 0)
- {
- p = temp + position - 1;
- *p = 0;
- strcpy(base_dir, temp);
- }
-
- log_info("Currect directory: %s",base_dir);
-
-#ifdef __NETWARE__
- // setup paths
- snprintf(bin_dir, PATH_MAX, "%s/bin", base_dir);
- snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir);
- snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir);
- snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir);
- snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir);
- snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir);
- snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir);
- snprintf(lang_dir, PATH_MAX, "%s/share/english", base_dir);
- snprintf(char_dir, PATH_MAX, "%s/share/charsets", base_dir);
-
-#ifdef HAVE_OPENSSL
- use_openssl = TRUE;
-#endif // HAVE_OPENSSL
-
- // OpenSSL paths
- snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir);
- snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir);
- snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir);
- snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir);
- snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir);
-
- // setup files
- snprintf(mysqld_file, PATH_MAX, "%s/mysqld", bin_dir);
- snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest", bin_dir);
- snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin", bin_dir);
- snprintf(master_pid, PATH_MAX, "%s/var/run/master.pid", mysql_test_dir);
- snprintf(slave_pid, PATH_MAX, "%s/var/run/slave.pid", mysql_test_dir);
-#elif __WIN__
- // setup paths
-#ifdef _DEBUG
- snprintf(bin_dir, PATH_MAX, "%s/client_debug", base_dir);
-#else
- snprintf(bin_dir, PATH_MAX, "%s/client_release", base_dir);
-#endif
- snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir);
- snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir);
- snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir);
- snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir);
- snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir);
- snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir);
- snprintf(lang_dir, PATH_MAX, "%s/share/english", base_dir);
- snprintf(char_dir, PATH_MAX, "%s/share/charsets", base_dir);
-
-#ifdef HAVE_OPENSSL
- use_openssl = TRUE;
-#endif // HAVE_OPENSSL
-
- // OpenSSL paths
- snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir);
- snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir);
- snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir);
- snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir);
- snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir);
-
- // setup files
- snprintf(mysqld_file, PATH_MAX, "%s/mysqld.exe", bin_dir);
- snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest.exe", bin_dir);
- snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin.exe", bin_dir);
-#else
- // setup paths
- snprintf(bin_dir, PATH_MAX, "%s/client", base_dir);
- snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir);
- snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir);
- snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir);
- snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir);
- snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir);
- snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir);
- snprintf(lang_dir, PATH_MAX, "%s/sql/share/english", base_dir);
- snprintf(char_dir, PATH_MAX, "%s/sql/share/charsets", base_dir);
-
-#ifdef HAVE_OPENSSL
- use_openssl = TRUE;
-#endif // HAVE_OPENSSL
-
- // OpenSSL paths
- snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir);
- snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir);
- snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir);
- snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir);
- snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir);
-
- // setup files
- snprintf(mysqld_file, PATH_MAX, "%s/sql/mysqld", base_dir);
- snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest", bin_dir);
- snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin", bin_dir);
- snprintf(master_pid, PATH_MAX, "%s/var/run/master.pid", mysql_test_dir);
- snprintf(slave_pid, PATH_MAX, "%s/var/run/slave.pid", mysql_test_dir);
-
- snprintf(master_socket,PATH_MAX, "%s/var/tmp/master.sock", mysql_test_dir);
- snprintf(slave_socket,PATH_MAX, "%s/var/tmp/slave.sock", mysql_test_dir);
-
-#endif
- // create log file
- snprintf(temp, PATH_MAX, "%s/mysql-test-run.log", mysql_test_dir);
- if ((log_fd = fopen(temp, "w+")) == NULL)
- {
- log_errno("Unable to create log file.");
- }
-
- // prepare skip test list
- while((p = strchr(skip_test, ',')) != NULL) *p = ' ';
- strcpy(temp, strlwr(skip_test));
- snprintf(skip_test, PATH_MAX, " %s ", temp);
-
- // environment
-#ifdef __NETWARE__
- setenv("MYSQL_TEST_DIR", mysql_test_dir, 1);
- snprintf(file_path, PATH_MAX*2, "%s/client/mysqldump --no-defaults -u root --port=%u", bin_dir, master_port);
- setenv("MYSQL_DUMP", file_path, 1);
- snprintf(file_path, PATH_MAX*2, "%s/client/mysqlbinlog --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir);
- setenv("MYSQL_BINLOG", file_path, 1);
-#elif __WIN__
- snprintf(file_path,MAX_PATH,"MYSQL_TEST_DIR=%s",mysql_test_dir);
- _putenv(file_path);
- snprintf(file_path, PATH_MAX*2, "MYSQL_DUMP=%s/mysqldump.exe --no-defaults -u root --port=%u", bin_dir, master_port);
- _putenv(file_path);
- snprintf(file_path, PATH_MAX*2, "MYSQL_BINLOG=%s/mysqlbinlog.exe --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir);
- _putenv(file_path);
-#else
- setenv("MYSQL_TEST_DIR", mysql_test_dir, 1);
- snprintf(file_path, PATH_MAX*2, "%s/mysqldump --no-defaults -u root --port=%u --socket=%s", bin_dir, master_port, master_socket);
- setenv("MYSQL_DUMP", file_path, 1);
- snprintf(file_path, PATH_MAX*2, "%s/mysqlbinlog --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir);
- setenv("MYSQL_BINLOG", file_path, 1);
-#endif
-
-#ifndef __WIN__
- setenv("MASTER_MYPORT", "9306", 1);
- setenv("SLAVE_MYPORT", "9307", 1);
- setenv("MYSQL_TCP_PORT", "3306", 1);
-#else
- _putenv("MASTER_MYPORT=9306");
- _putenv("SLAVE_MYPORT=9307");
- _putenv("MYSQL_TCP_PORT=3306");
-#endif
-
-}
-
-/******************************************************************************
-
- main()
-
-******************************************************************************/
-int main(int argc, char **argv)
-{
- int is_ignore_list = 0;
- // setup
- setup(argv[0]);
-
- /* The --ignore option is comma saperated list of test cases to skip and
- should be very first command line option to the test suite.
-
- The usage is now:
- mysql_test_run --ignore=test1,test2 test3 test4
- where test1 and test2 are test cases to ignore
- and test3 and test4 are test cases to run.
- */
- if (argc >= 2 && !strnicmp(argv[1], "--ignore=", sizeof("--ignore=")-1))
- {
- char *temp, *token;
- temp= strdup(strchr(argv[1],'=') + 1);
- for (token=str_tok(temp, ","); token != NULL; token=str_tok(NULL, ","))
- {
- if (strlen(ignore_test) + strlen(token) + 2 <= PATH_MAX-1)
- sprintf(ignore_test+strlen(ignore_test), " %s ", token);
- else
- {
- free(temp);
- die("ignore list too long.");
- }
- }
- free(temp);
- is_ignore_list = 1;
- }
- // header
-#ifndef __WIN__
- mlog("MySQL Server %s, for %s (%s)\n\n", VERSION, SYSTEM_TYPE, MACHINE_TYPE);
-#else
- mlog("MySQL Server ---, for %s (%s)\n\n", SYSTEM_TYPE, MACHINE_TYPE);
-#endif
-
- mlog("Initializing Tests...\n");
-
- // install test databases
- mysql_install_db();
-
- mlog("Starting Tests...\n");
-
- mlog("\n");
- mlog(HEADER);
- mlog(DASH);
-
- if ( argc > 1 + is_ignore_list )
- {
- int i;
-
- // single test
- single_test = TRUE;
-
- for (i = 1 + is_ignore_list; i < argc; i++)
- {
- // run given test
- run_test(argv[i]);
- }
- }
- else
- {
- // run all tests
-#ifndef __WIN__
- struct dirent **namelist;
- int i,n;
- char test[NAME_MAX];
- char *p;
- int position;
-
- n = scandir(test_dir, &namelist, 0, alphasort);
- if (n < 0)
- die("Unable to open tests directory.");
- else
- {
- for (i = 0; i < n; i++)
- {
- strcpy(test, strlwr(namelist[i]->d_name));
- // find the test suffix
- if ((position = strinstr(test, TEST_SUFFIX)) != 0)
- {
- p = test + position - 1;
- // null terminate at the suffix
- *p = 0;
- // run test
- run_test(test);
- }
- free(namelist[n]);
- }
- free(namelist);
- }
-#else
- struct _finddata_t dir;
- intptr_t handle;
- char test[NAME_MAX];
- char mask[PATH_MAX];
- char *p;
- int position;
- char **names = 0;
- char **testes = 0;
- int name_index;
- int index;
-
- // single test
- single_test = FALSE;
-
- snprintf(mask,MAX_PATH,"%s/*.test",test_dir);
-
- if ((handle=_findfirst(mask,&dir)) == -1L)
- {
- die("Unable to open tests directory.");
- }
-
- names = malloc(MAX_COUNT_TESTES*4);
- testes = names;
- name_index = 0;
-
- do
- {
- if (!(dir.attrib & _A_SUBDIR))
- {
- strcpy(test, strlwr(dir.name));
-
- // find the test suffix
- if ((position = strinstr(test, TEST_SUFFIX)) != 0)
- {
- p = test + position - 1;
- // null terminate at the suffix
- *p = 0;
-
- // insert test
- *names = malloc(PATH_MAX);
- strcpy(*names,test);
- names++;
- name_index++;
- }
- }
- }while (_findnext(handle,&dir) == 0);
-
- _findclose(handle);
-
- qsort( (void *)testes, name_index, sizeof( char * ), compare );
-
- for (index = 0; index <= name_index; index++)
- {
- run_test(testes[index]);
- free(testes[index]);
- }
-
- free(testes);
-#endif
- }
-
- // stop server
- mysql_stop();
-
- mlog(DASH);
- mlog("\n");
-
- mlog("Ending Tests...\n");
-
- // report stats
- report_stats();
-
- // close log
- if (log_fd) fclose(log_fd);
-
- // keep results up
-#ifdef __NETWARE__
- pressanykey();
-#endif
- return 0;
-}
-
-
-/*
- Synopsis:
- This function breaks the string into a sequence of tokens. The difference
- between this function and strtok is that it respects the quoted string i.e.
- it skips any delimiter character within the quoted part of the string.
- It return tokens by eliminating quote character. It modifies the input string
- passed. It will work with whitespace delimeter but may not work properly with
- other delimeter. If the delimeter will contain any quote character, then
- function will not tokenize and will return null string.
- e.g. if input string is
- --init-slave="set global max_connections=500" --skip-external-locking
- then the output will two string i.e.
- --init-slave=set global max_connections=500
- --skip-external-locking
-
-Arguments:
- string: input string
- delim: set of delimiter character
-Output:
- return the null terminated token of NULL.
-*/
-
-
-char *str_tok(char *string, const char *delim)
-{
- char *token; /* current token received from strtok */
- char *qt_token; /* token delimeted by the matching pair of quote */
- /*
- if there are any quote chars found in the token then this variable
- will hold the concatenated string to return to the caller
- */
- char *ptr_token=NULL;
- /* pointer to the quote character in the token from strtok */
- char *ptr_quote=NULL;
-
- /* See if the delimeter contains any quote character */
- if (strchr(delim,'\'') || strchr(delim,'\"'))
- return NULL;
-
- /* repeate till we are getting some token from strtok */
- while ((token = (char*)strtok(string, delim) ) != NULL)
- {
- /*
- make the input string NULL so that next time onward strtok can
- be called with NULL input string.
- */
- string = NULL;
- /*
- We don't need to remove any quote character for Windows version
- */
-#ifndef __WIN__
- /* check if the current token contain double quote character*/
- if ((ptr_quote = (char*)strchr(token,'\"')) != NULL)
- {
- /*
- get the matching the matching double quote in the remaining
- input string
- */
- qt_token = (char*)strtok(NULL,"\"");
- }
- /* check if the current token contain single quote character*/
- else if ((ptr_quote = (char*)strchr(token,'\'')) != NULL)
- {
- /*
- get the matching the matching single quote in the remaining
- input string
- */
- qt_token = (char*)strtok(NULL,"\'");
- }
-#endif
- /*
- if the current token does not contains any quote character then
- return to the caller.
- */
- if (ptr_quote == NULL)
- {
- /*
- if there is any earlier token i.e. ptr_token then append the
- current token in it and return it else return the current
- token directly
- */
- return ptr_token ? strcat(ptr_token,token) : token;
- }
-
- /*
- remove the quote character i.e. make NULL so that the token will
- be devided in two part and later both part can be concatenated
- and hence quote will be removed
- */
- *ptr_quote= 0;
-
- /* check if ptr_token has been initialized or not */
- if (ptr_token == NULL)
- {
- /* initialize the ptr_token with current token */
- ptr_token= token;
- /* copy entire string between matching pair of quote*/
- sprintf(ptr_token+strlen(ptr_token),"%s %s", ptr_quote+1, qt_token);
- }
- else
- {
- /*
- copy the current token and entire string between matching pair
- of quote
- */
- if (qt_token == NULL)
- {
- sprintf(ptr_token+strlen(ptr_token),"%s%s", token, ptr_quote+1);
- }
- else
- {
- sprintf(ptr_token+strlen(ptr_token),"%s%s %s", token, ptr_quote+1,
- qt_token );
- }
- }
- }
-
- /* return the concatenated token */
- return ptr_token;
-}
-
-#ifndef __WIN__
-
-/*
- Synopsis:
- This function run scripts files on Linux and Netware
-
-Arguments:
- script_name: name of script file
-
-Output:
- nothing
-*/
-void run_init_script(const char *script_name)
-{
- arg_list_t al;
- int err;
-
- // args
- init_args(&al);
- add_arg(&al, sh_file);
- add_arg(&al, script_name);
-
- // spawn
- if ((err = spawn(sh_file, &al, TRUE, NULL, NULL, NULL, NULL)) != 0)
- {
- die("Unable to run script.");
- }
-
- // free args
- free_args(&al);
-}
-#endif
diff --git a/mysql-test/r/ctype_latin1.result b/mysql-test/r/ctype_latin1.result
index a8182438ac4..355f53b63a5 100644
--- a/mysql-test/r/ctype_latin1.result
+++ b/mysql-test/r/ctype_latin1.result
@@ -296,3 +296,12 @@ FD C3BD FD 1
FE C3BE FE 1
FF C3BF FF 1
DROP TABLE t1;
+select 'a' regexp 'A' collate latin1_general_ci;
+'a' regexp 'A' collate latin1_general_ci
+1
+select 'a' regexp 'A' collate latin1_general_cs;
+'a' regexp 'A' collate latin1_general_cs
+0
+select 'a' regexp 'A' collate latin1_bin;
+'a' regexp 'A' collate latin1_bin
+0
diff --git a/mysql-test/r/ctype_uca.result b/mysql-test/r/ctype_uca.result
index 7620b18eea6..cb060ad7ee4 100644
--- a/mysql-test/r/ctype_uca.result
+++ b/mysql-test/r/ctype_uca.result
@@ -19,6 +19,9 @@ select 'a ' = 'a\t', 'a ' < 'a\t', 'a ' > 'a\t';
select 'a a' > 'a', 'a \t' < 'a';
'a a' > 'a' 'a \t' < 'a'
1 1
+select 'c' like '\_' as want0;
+want0
+0
CREATE TABLE t (
c char(20) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result
index 945ec8eae99..599d49208e7 100644
--- a/mysql-test/r/ctype_utf8.result
+++ b/mysql-test/r/ctype_utf8.result
@@ -814,3 +814,6 @@ t2 CREATE TABLE `t2` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t2;
drop table t1;
+select 'c' like '\_' as want0;
+want0
+0
diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result
index 5575ee1bf98..f1fba87c70b 100644
--- a/mysql-test/r/delete.result
+++ b/mysql-test/r/delete.result
@@ -16,12 +16,34 @@ SET AUTOCOMMIT=0;
DELETE from t1;
SET AUTOCOMMIT=1;
drop table t1;
-create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a));
-insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23);
+create table t1 (
+a bigint not null,
+b bigint not null default 0,
+c bigint not null default 0,
+d bigint not null default 0,
+e bigint not null default 0,
+f bigint not null default 0,
+g bigint not null default 0,
+h bigint not null default 0,
+i bigint not null default 0,
+j bigint not null default 0,
+primary key (a,b,c,d,e,f,g,h,i,j));
+insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23);
delete from t1 where a=26;
drop table t1;
-create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a));
-insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27);
+create table t1 (
+a bigint not null,
+b bigint not null default 0,
+c bigint not null default 0,
+d bigint not null default 0,
+e bigint not null default 0,
+f bigint not null default 0,
+g bigint not null default 0,
+h bigint not null default 0,
+i bigint not null default 0,
+j bigint not null default 0,
+primary key (a,b,c,d,e,f,g,h,i,j));
+insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27);
delete from t1 where a=27;
drop table t1;
CREATE TABLE `t1` (
diff --git a/mysql-test/r/fulltext.result b/mysql-test/r/fulltext.result
index 5f0b228e23d..1951f68e822 100644
--- a/mysql-test/r/fulltext.result
+++ b/mysql-test/r/fulltext.result
@@ -190,6 +190,14 @@ a
select * from t1 where match a against ("+aaa10 +(bbb*)" in boolean mode);
a
aaa10 bbb20
+select * from t1 where match a against ("+(+aaa* +bbb1*)" in boolean mode);
+a
+aaa20 bbb15
+aaa30 bbb10
+select * from t1 where match a against ("(+aaa* +bbb1*)" in boolean mode);
+a
+aaa20 bbb15
+aaa30 bbb10
drop table t1;
CREATE TABLE t1 (
id int(11),
diff --git a/mysql-test/r/heap.result b/mysql-test/r/heap.result
index 3f468a5751e..e741a6859c7 100644
--- a/mysql-test/r/heap.result
+++ b/mysql-test/r/heap.result
@@ -233,3 +233,10 @@ SELECT * FROM t1 WHERE B is not null;
a B
1 1
DROP TABLE t1;
+CREATE TABLE t1 (pseudo char(35) PRIMARY KEY, date int(10) unsigned NOT NULL) ENGINE=HEAP;
+INSERT INTO t1 VALUES ('massecot',1101106491),('altec',1101106492),('stitch+',1101106304),('Seb Corgan',1101106305),('beerfilou',1101106263),('flaker',1101106529),('joce8',5),('M4vrick',1101106418),('gabay008',1101106525),('Vamp irX',1101106291),('ZoomZip',1101106546),('rip666',1101106502),('CBP ',1101106397),('guezpard',1101106496);
+DELETE FROM t1 WHERE date<1101106546;
+SELECT * FROM t1;
+pseudo date
+ZoomZip 1101106546
+DROP TABLE t1;
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index 9ebf33be5bd..8f5ac88b7d0 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -363,40 +363,17 @@ NULL test PRIMARY NULL test t1 a 1 NULL NULL NULL
NULL test constraint_1 NULL test t1 a 1 NULL NULL NULL
NULL test key_1 NULL test t1 a 1 NULL NULL NULL
NULL test key_2 NULL test t1 a 1 NULL NULL NULL
-drop table t1;
-CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
-CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id),
-FOREIGN KEY (t1_id) REFERENCES t1(id) ON DELETE CASCADE,
-FOREIGN KEY (t1_id) REFERENCES t1(id) ON UPDATE CASCADE) ENGINE=INNODB;
-select * from information_schema.TABLE_CONSTRAINTS where
-TABLE_SCHEMA= "test";
-CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE CONSTRAINT_METHOD
-NULL test PRIMARY test t1 PRIMARY KEY NULL
-NULL test PRIMARY test t2 PRIMARY KEY NULL
-NULL test t2_ibfk_1 test t2 FOREIGN KEY ON DELETE CASCADE
-NULL test t2_ibfk_2 test t2 FOREIGN KEY ON UPDATE CASCADE
-select * from information_schema.KEY_COLUMN_USAGE where
-TABLE_SCHEMA= "test";
-CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
-NULL test PRIMARY NULL test t1 id 1 NULL NULL NULL
-NULL test PRIMARY NULL test t2 id 1 NULL NULL NULL
-NULL test t2_ibfk_1 NULL test t2 t1_id 1 NULL id
-NULL test t2_ibfk_2 NULL test t2 t1_id 1 NULL id
select table_name from information_schema.TABLES where table_schema like "test%";
table_name
t1
-t2
select table_name,column_name from information_schema.COLUMNS where table_schema like "test%";
table_name column_name
-t1 id
-t2 id
-t2 t1_id
+t1 a
select ROUTINE_NAME from information_schema.ROUTINES;
ROUTINE_NAME
sel2
sub1
delete from mysql.user where user='mysqltest_1';
-drop table t2;
drop table t1;
drop procedure sel2;
drop function sub1;
@@ -442,6 +419,7 @@ v
select sql_mode from information_schema.ROUTINES;
sql_mode
+drop procedure px5;
create table t1 (a int not null auto_increment,b int, primary key (a));
insert into t1 values (1,1),(NULL,3),(NULL,4);
select AUTO_INCREMENT from information_schema.tables where table_name = 't1';
@@ -508,3 +486,34 @@ g int(11) 11 11 11 0
h double(10,3) 10 10 10 3
i double 22 22 22 NULL
drop table t1;
+create table t115 as select table_name, column_name, column_type
+from information_schema.columns where table_name = 'proc';
+select * from t115;
+table_name column_name column_type
+proc db varchar(64)
+proc name varchar(64)
+proc type enum('FUNCTION','PROCEDURE')
+proc specific_name varchar(64)
+proc language enum('SQL')
+proc sql_data_access enum('CONTAINS_SQL','NO_SQL','READS_SQL_DATA','MODIFIES_SQL_DATA')
+proc is_deterministic enum('YES','NO')
+proc security_type enum('INVOKER','DEFINER')
+proc param_list blob
+proc returns varchar(64)
+proc body blob
+proc definer varchar(77)
+proc created timestamp
+proc modified timestamp
+proc sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO')
+proc comment varchar(64)
+drop table t115;
+create view vk as select count(*) from information_schema.tables a;
+select * from vk;
+count(*)
+17
+drop view vk;
+create procedure p108 () begin declare c cursor for select data_type
+from information_schema.columns; open c; open c; end;//
+call p108()//
+ERROR 24000: Cursor is already open
+drop procedure p108;
diff --git a/mysql-test/r/information_schema_inno.result b/mysql-test/r/information_schema_inno.result
new file mode 100644
index 00000000000..e6dcda2c15d
--- /dev/null
+++ b/mysql-test/r/information_schema_inno.result
@@ -0,0 +1,19 @@
+CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
+CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id),
+FOREIGN KEY (t1_id) REFERENCES t1(id) ON DELETE CASCADE,
+FOREIGN KEY (t1_id) REFERENCES t1(id) ON UPDATE CASCADE) ENGINE=INNODB;
+select * from information_schema.TABLE_CONSTRAINTS where
+TABLE_SCHEMA= "test";
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE CONSTRAINT_METHOD
+NULL test PRIMARY test t1 PRIMARY KEY NULL
+NULL test PRIMARY test t2 PRIMARY KEY NULL
+NULL test t2_ibfk_1 test t2 FOREIGN KEY ON DELETE CASCADE
+NULL test t2_ibfk_2 test t2 FOREIGN KEY ON UPDATE CASCADE
+select * from information_schema.KEY_COLUMN_USAGE where
+TABLE_SCHEMA= "test";
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+NULL test PRIMARY NULL test t1 id 1 NULL NULL NULL
+NULL test PRIMARY NULL test t2 id 1 NULL NULL NULL
+NULL test t2_ibfk_1 NULL test t2 t1_id 1 NULL id
+NULL test t2_ibfk_2 NULL test t2 t1_id 1 NULL id
+drop table t2, t1;
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 6097dadb1fa..9f19dd479c8 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -1664,6 +1664,24 @@ select count(*) from t1 where x = 18446744073709551601;
count(*)
1
drop table t1;
+create table t1 (c char(10), index (c,c)) engine=innodb;
+ERROR 42S21: Duplicate column name 'c'
+create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=innodb;
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=innodb;
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=innodb;
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10)) engine=innodb;
+alter table t1 add key (c1,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c2,c1,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c1,c2,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c1,c1,c2);
+ERROR 42S21: Duplicate column name 'c1'
+drop table t1;
show status like "Innodb_buffer_pool_pages_total";
Variable_name Value
Innodb_buffer_pool_pages_total 512
diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result
index b11969ab6d7..98e8851bb7e 100644
--- a/mysql-test/r/key.result
+++ b/mysql-test/r/key.result
@@ -311,3 +311,21 @@ test.t1 check status OK
drop table t1;
create table t1 (c char(10), index (c(0)));
ERROR HY000: Key part 'c' length cannot be 0
+create table t1 (c char(10), index (c,c));
+ERROR 42S21: Duplicate column name 'c'
+create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1));
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2));
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1));
+ERROR 42S21: Duplicate column name 'c1'
+create table t1 (c1 char(10), c2 char(10));
+alter table t1 add key (c1,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c2,c1,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c1,c2,c1);
+ERROR 42S21: Duplicate column name 'c1'
+alter table t1 add key (c1,c1,c2);
+ERROR 42S21: Duplicate column name 'c1'
+drop table t1;
diff --git a/mysql-test/r/lowercase_table3.result b/mysql-test/r/lowercase_table3.result
index a645e46be9e..8182d07c26b 100644
--- a/mysql-test/r/lowercase_table3.result
+++ b/mysql-test/r/lowercase_table3.result
@@ -6,5 +6,5 @@ drop table t1;
flush tables;
CREATE TABLE t1 (a int) ENGINE=INNODB;
SELECT * from T1;
-ERROR HY000: Can't open file: 'T1.InnoDB' (errno: 1)
+ERROR HY000: Can't open file: 'T1.ibd' (errno: 1)
drop table t1;
diff --git a/mysql-test/r/lowercase_view.result b/mysql-test/r/lowercase_view.result
index 64b40389690..0644b32015c 100644
--- a/mysql-test/r/lowercase_view.result
+++ b/mysql-test/r/lowercase_view.result
@@ -22,3 +22,19 @@ insert into v2aA values ((select max(col1) from v1aA));
ERROR HY000: You can't specify target table 'v2aa' for update in FROM clause
drop view v2Aa,v1Aa;
drop table t1Aa,t2Aa;
+create table t1Aa (col1 int);
+create view v1Aa as select col1 from t1Aa as AaA;
+show create view v1AA;
+View Create View
+v1aa CREATE ALGORITHM=UNDEFINED VIEW `test`.`v1aa` AS select `aaa`.`col1` AS `col1` from `test`.`t1aa` `AaA`
+drop view v1AA;
+select Aaa.col1 from t1Aa as AaA;
+col1
+create view v1Aa as select Aaa.col1 from t1Aa as AaA;
+drop view v1AA;
+create view v1Aa as select AaA.col1 from t1Aa as AaA;
+show create view v1AA;
+View Create View
+v1aa CREATE ALGORITHM=UNDEFINED VIEW `test`.`v1aa` AS select `aaa`.`col1` AS `col1` from `test`.`t1aa` `AaA`
+drop view v1AA;
+drop table t1Aa;
diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result
index af9b84022ed..31b258c0a6f 100644
--- a/mysql-test/r/ndb_index_unique.result
+++ b/mysql-test/r/ndb_index_unique.result
@@ -44,6 +44,51 @@ a b c
7 8 3
8 2 3
drop table t1;
+CREATE TABLE t1 (
+a int unsigned NOT NULL PRIMARY KEY,
+b int unsigned,
+c int unsigned,
+UNIQUE bc(b,c)
+) engine = ndb;
+insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL);
+select * from t1 use index (bc) where b IS NULL order by a;
+a b c
+2 NULL 2
+3 NULL NULL
+select * from t1 use index (bc)order by a;
+a b c
+1 1 1
+2 NULL 2
+3 NULL NULL
+4 4 NULL
+select * from t1 use index (bc) order by a;
+a b c
+1 1 1
+2 NULL 2
+3 NULL NULL
+4 4 NULL
+select * from t1 use index (PRIMARY) where b IS NULL order by a;
+a b c
+2 NULL 2
+3 NULL NULL
+select * from t1 use index (bc) where b IS NULL order by a;
+a b c
+2 NULL 2
+3 NULL NULL
+select * from t1 use index (bc) where b IS NULL and c IS NULL order by a;
+a b c
+select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
+a b c
+select * from t1 use index (bc) where b < 4 order by a;
+a b c
+1 1 1
+select * from t1 use index (bc) where b IS NOT NULL order by a;
+a b c
+1 1 1
+4 4 NULL
+insert into t1 values(5,1,1);
+ERROR 23000: Duplicate entry '5' for key 1
+drop table t1;
CREATE TABLE t2 (
a int unsigned NOT NULL PRIMARY KEY,
b int unsigned not null,
@@ -87,6 +132,13 @@ a b c
7 8 3
8 2 3
drop table t2;
+CREATE TABLE t2 (
+a int unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned,
+UNIQUE USING HASH (b, c)
+) engine=ndbcluster;
+ERROR 42000: Column 'c' is used with UNIQUE or INDEX but is not defined as NOT NULL
CREATE TABLE t3 (
a int unsigned NOT NULL,
b int unsigned not null,
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index e0f230fa579..a4c16206b11 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -450,6 +450,27 @@ PREPARE stmt FROM 'UPDATE t1 AS P1 INNER JOIN (SELECT N FROM t1 GROUP BY N HAVIN
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
+prepare stmt from "select ? is null, ? is not null, ?";
+select @no_such_var is null, @no_such_var is not null, @no_such_var;
+@no_such_var is null @no_such_var is not null @no_such_var
+1 0 NULL
+execute stmt using @no_such_var, @no_such_var, @no_such_var;
+? is null ? is not null ?
+1 0 NULL
+set @var='abc';
+select @var is null, @var is not null, @var;
+@var is null @var is not null @var
+0 1 abc
+execute stmt using @var, @var, @var;
+? is null ? is not null ?
+0 1 abc
+set @var=null;
+select @var is null, @var is not null, @var;
+@var is null @var is not null @var
+1 0 NULL
+execute stmt using @var, @var, @var;
+? is null ? is not null ?
+1 0 NULL
create table t1 (a varchar(20));
insert into t1 values ('foo');
prepare stmt FROM 'SELECT char_length (a) FROM t1';
diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result
index 1374a8fd0ed..8a1d80e9f79 100644
--- a/mysql-test/r/range.result
+++ b/mysql-test/r/range.result
@@ -218,12 +218,14 @@ drop table t1;
create table t1 (x int, y int, index(x), index(y));
insert into t1 (x) values (1),(2),(3),(4),(5),(6),(7),(8),(9);
update t1 set y=x;
-explain select * from t1, t1 t2 where t1.y = 2 and t2.x between 7 and t1.y+0;
+explain select * from t1, t1 t2 where t1.y = 8 and t2.x between 7 and t1.y+0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
-explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= 7 and t2.x <= t1.y+0;
+1 SIMPLE t1 ref y y 5 const 1 Using where
+1 SIMPLE t2 range x x 5 NULL 2 Using where
+explain select * from t1, t1 t2 where t1.y = 8 and t2.x >= 7 and t2.x <= t1.y+0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE t1 ref y y 5 const 1 Using where
+1 SIMPLE t2 range x x 5 NULL 2 Using where
explain select * from t1, t1 t2 where t1.y = 2 and t2.x between t1.y-1 and t1.y+1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref y y 5 const 1 Using where
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index ceb8919b26a..20ab3ac8f80 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -1998,3 +1998,18 @@ ac
700
NULL
drop tables t1,t2;
+create table t1 (a int not null, b int not null, c int, primary key (a,b));
+insert into t1 values (1,1,1), (2,2,2), (3,3,3);
+set @b:= 0;
+explain select sum(a) from t1 where b > @b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 8 NULL 3 Using where; Using index
+set @a:= (select sum(a) from t1 where b > @b);
+explain select a from t1 where c=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
+do @a:= (select sum(a) from t1 where b > @b);
+explain select a from t1 where c=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
+drop table t1;
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index b45aaea0cbe..4a85097cfce 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -150,6 +150,10 @@ create trigger trg before delete on t1 for each row set new.i:=1;
ERROR HY000: There is no NEW row in on DELETE trigger
create trigger trg after update on t1 for each row set new.i:=1;
ERROR HY000: Updating of NEW row is not allowed in after trigger
+create trigger trg before update on t1 for each row set new.j:=1;
+ERROR 42S22: Unknown column 'j' in 'NEW'
+create trigger trg before update on t1 for each row set @a:=old.j;
+ERROR 42S22: Unknown column 'j' in 'OLD'
create trigger trg before insert on t2 for each row set @a:=1;
ERROR 42S02: Table 'test.t2' doesn't exist
create trigger trg before insert on t1 for each row set @a:=1;
diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result
index 2f564112e40..c9c15b4c293 100644
--- a/mysql-test/r/type_blob.result
+++ b/mysql-test/r/type_blob.result
@@ -682,8 +682,8 @@ id txt
3 NULL
1 Chevy
drop table t1;
-CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1)));
-INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
+CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, d varchar(1) NOT NULL DEFAULT ' ', PRIMARY KEY (i), KEY (c(1),d));
+INSERT t1 (i, c) VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
select max(i) from t1 where c = '';
max(i)
4
diff --git a/mysql-test/t/ctype_latin1.test b/mysql-test/t/ctype_latin1.test
index 14062437428..677acd9faa9 100644
--- a/mysql-test/t/ctype_latin1.test
+++ b/mysql-test/t/ctype_latin1.test
@@ -53,3 +53,10 @@ SELECT
hex(@l:=convert(@u using latin1)),
a=@l FROM t1;
DROP TABLE t1;
+
+#
+# Bug #6737: REGEXP gives wrong result with case sensitive collation
+#
+select 'a' regexp 'A' collate latin1_general_ci;
+select 'a' regexp 'A' collate latin1_general_cs;
+select 'a' regexp 'A' collate latin1_bin;
diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test
index e640e6b53dc..11833ba9bc7 100644
--- a/mysql-test/t/ctype_uca.test
+++ b/mysql-test/t/ctype_uca.test
@@ -25,6 +25,11 @@ select 'a ' = 'a\t', 'a ' < 'a\t', 'a ' > 'a\t';
select 'a a' > 'a', 'a \t' < 'a';
#
+# Bug #6787 LIKE not working properly with _ and utf8 data
+#
+select 'c' like '\_' as want0;
+
+#
# Bug #5679 utf8_unicode_ci LIKE--trailing % doesn't equal zero characters
#
CREATE TABLE t (
diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test
index c75b1dee63c..42031be8f3c 100644
--- a/mysql-test/t/ctype_utf8.test
+++ b/mysql-test/t/ctype_utf8.test
@@ -660,3 +660,9 @@ create table t2 select concat(a,_utf8'') as a, concat(b,_utf8'')as b from t1;
show create table t2;
drop table t2;
drop table t1;
+
+#
+# Bug #6787 LIKE not working properly with _ and utf8 data
+#
+select 'c' like '\_' as want0;
+
diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test
index 5f60445d765..0bf7187865d 100644
--- a/mysql-test/t/delete.test
+++ b/mysql-test/t/delete.test
@@ -29,12 +29,34 @@ drop table t1;
# (This assumes a block size of 1024)
#
-create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a));
-insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23);
+create table t1 (
+ a bigint not null,
+ b bigint not null default 0,
+ c bigint not null default 0,
+ d bigint not null default 0,
+ e bigint not null default 0,
+ f bigint not null default 0,
+ g bigint not null default 0,
+ h bigint not null default 0,
+ i bigint not null default 0,
+ j bigint not null default 0,
+ primary key (a,b,c,d,e,f,g,h,i,j));
+insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23);
delete from t1 where a=26;
drop table t1;
-create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a));
-insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27);
+create table t1 (
+ a bigint not null,
+ b bigint not null default 0,
+ c bigint not null default 0,
+ d bigint not null default 0,
+ e bigint not null default 0,
+ f bigint not null default 0,
+ g bigint not null default 0,
+ h bigint not null default 0,
+ i bigint not null default 0,
+ j bigint not null default 0,
+ primary key (a,b,c,d,e,f,g,h,i,j));
+insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27);
delete from t1 where a=27;
drop table t1;
diff --git a/mysql-test/t/fulltext.test b/mysql-test/t/fulltext.test
index afbe8f8117c..41fbf3f27ac 100644
--- a/mysql-test/t/fulltext.test
+++ b/mysql-test/t/fulltext.test
@@ -98,6 +98,8 @@ select * from t1 where match a against ("+aaa* +bbb*" in boolean mode);
select * from t1 where match a against ("+aaa* +bbb1*" in boolean mode);
select * from t1 where match a against ("+aaa* +ccc*" in boolean mode);
select * from t1 where match a against ("+aaa10 +(bbb*)" in boolean mode);
+select * from t1 where match a against ("+(+aaa* +bbb1*)" in boolean mode);
+select * from t1 where match a against ("(+aaa* +bbb1*)" in boolean mode);
drop table t1;
#
diff --git a/mysql-test/t/heap.test b/mysql-test/t/heap.test
index e1776245d9e..2eff36f3317 100644
--- a/mysql-test/t/heap.test
+++ b/mysql-test/t/heap.test
@@ -174,3 +174,14 @@ CREATE TABLE t1 (a INT NOT NULL, B INT, KEY(B)) ENGINE=HEAP;
INSERT INTO t1 VALUES(1,1), (1,NULL);
SELECT * FROM t1 WHERE B is not null;
DROP TABLE t1;
+
+#
+# Bug #6748
+# heap_rfirst() doesn't work (and never did!)
+#
+CREATE TABLE t1 (pseudo char(35) PRIMARY KEY, date int(10) unsigned NOT NULL) ENGINE=HEAP;
+INSERT INTO t1 VALUES ('massecot',1101106491),('altec',1101106492),('stitch+',1101106304),('Seb Corgan',1101106305),('beerfilou',1101106263),('flaker',1101106529),('joce8',5),('M4vrick',1101106418),('gabay008',1101106525),('Vamp irX',1101106291),('ZoomZip',1101106546),('rip666',1101106502),('CBP ',1101106397),('guezpard',1101106496);
+DELETE FROM t1 WHERE date<1101106546;
+SELECT * FROM t1;
+DROP TABLE t1;
+
diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test
index ce8dc0290e9..411301445a3 100644
--- a/mysql-test/t/information_schema.test
+++ b/mysql-test/t/information_schema.test
@@ -162,16 +162,7 @@ select * from information_schema.TABLE_CONSTRAINTS where
TABLE_SCHEMA= "test";
select * from information_schema.KEY_COLUMN_USAGE where
TABLE_SCHEMA= "test";
-drop table t1;
-CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
-CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id),
-FOREIGN KEY (t1_id) REFERENCES t1(id) ON DELETE CASCADE,
-FOREIGN KEY (t1_id) REFERENCES t1(id) ON UPDATE CASCADE) ENGINE=INNODB;
-select * from information_schema.TABLE_CONSTRAINTS where
-TABLE_SCHEMA= "test";
-select * from information_schema.KEY_COLUMN_USAGE where
-TABLE_SCHEMA= "test";
connect (user1,localhost,mysqltest_1,,);
connection user1;
@@ -181,7 +172,6 @@ select ROUTINE_NAME from information_schema.ROUTINES;
disconnect user1;
connection default;
delete from mysql.user where user='mysqltest_1';
-drop table t2;
drop table t1;
drop procedure sel2;
drop function sub1;
@@ -218,6 +208,7 @@ call px5()//
call px5()//
delimiter ;//
select sql_mode from information_schema.ROUTINES;
+drop procedure px5;
create table t1 (a int not null auto_increment,b int, primary key (a));
insert into t1 values (1,1),(NULL,3),(NULL,4);
@@ -252,3 +243,20 @@ select COLUMN_NAME,COLUMN_TYPE, CHARACTER_MAXIMUM_LENGTH,
CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE
from information_schema.columns where table_name= 't1';
drop table t1;
+
+create table t115 as select table_name, column_name, column_type
+from information_schema.columns where table_name = 'proc';
+select * from t115;
+drop table t115;
+
+create view vk as select count(*) from information_schema.tables a;
+select * from vk;
+drop view vk;
+
+delimiter //;
+create procedure p108 () begin declare c cursor for select data_type
+from information_schema.columns; open c; open c; end;//
+--error 1325
+call p108()//
+delimiter ;//
+drop procedure p108;
diff --git a/mysql-test/t/information_schema_inno.test b/mysql-test/t/information_schema_inno.test
new file mode 100644
index 00000000000..15643ebe90c
--- /dev/null
+++ b/mysql-test/t/information_schema_inno.test
@@ -0,0 +1,16 @@
+-- source include/have_innodb.inc
+
+#
+# Test for KEY_COLUMN_USAGE & TABLE_CONSTRAINTS tables
+#
+
+CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
+CREATE TABLE t2 (id INT PRIMARY KEY, t1_id INT, INDEX par_ind (t1_id),
+FOREIGN KEY (t1_id) REFERENCES t1(id) ON DELETE CASCADE,
+FOREIGN KEY (t1_id) REFERENCES t1(id) ON UPDATE CASCADE) ENGINE=INNODB;
+select * from information_schema.TABLE_CONSTRAINTS where
+TABLE_SCHEMA= "test";
+select * from information_schema.KEY_COLUMN_USAGE where
+TABLE_SCHEMA= "test";
+
+drop table t2, t1;
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index 5ea6817bfba..47324f1b828 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -1180,6 +1180,28 @@ select count(*) from t1 where x = 18446744073709551601;
drop table t1;
+#
+# Bug #6126: Duplicate columns in keys gives misleading error message
+#
+--error 1060
+create table t1 (c char(10), index (c,c)) engine=innodb;
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=innodb;
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=innodb;
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=innodb;
+create table t1 (c1 char(10), c2 char(10)) engine=innodb;
+--error 1060
+alter table t1 add key (c1,c1);
+--error 1060
+alter table t1 add key (c2,c1,c1);
+--error 1060
+alter table t1 add key (c1,c2,c1);
+--error 1060
+alter table t1 add key (c1,c1,c2);
+drop table t1;
+
# Test for testable InnoDB status variables. This test
# uses previous ones(pages_created, rows_deleted, ...).
show status like "Innodb_buffer_pool_pages_total";
diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test
index 0a86c1cd145..af3509c8454 100644
--- a/mysql-test/t/key.test
+++ b/mysql-test/t/key.test
@@ -301,3 +301,26 @@ drop table t1;
--error 1391
create table t1 (c char(10), index (c(0)));
+
+#
+# Bug #6126: Duplicate columns in keys should fail
+# Bug #6252: (dup)
+#
+--error 1060
+create table t1 (c char(10), index (c,c));
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1));
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2));
+--error 1060
+create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1));
+create table t1 (c1 char(10), c2 char(10));
+--error 1060
+alter table t1 add key (c1,c1);
+--error 1060
+alter table t1 add key (c2,c1,c1);
+--error 1060
+alter table t1 add key (c1,c2,c1);
+--error 1060
+alter table t1 add key (c1,c1,c2);
+drop table t1;
diff --git a/mysql-test/t/lowercase_view.test b/mysql-test/t/lowercase_view.test
index 2a2757650ae..4b688cfb922 100644
--- a/mysql-test/t/lowercase_view.test
+++ b/mysql-test/t/lowercase_view.test
@@ -32,3 +32,18 @@ delete from v2aA where col1 = (select max(col1) from v1aA);
insert into v2aA values ((select max(col1) from v1aA));
drop view v2Aa,v1Aa;
drop table t1Aa,t2Aa;
+
+#
+# aliases in VIEWs
+#
+create table t1Aa (col1 int);
+create view v1Aa as select col1 from t1Aa as AaA;
+show create view v1AA;
+drop view v1AA;
+select Aaa.col1 from t1Aa as AaA;
+create view v1Aa as select Aaa.col1 from t1Aa as AaA;
+drop view v1AA;
+create view v1Aa as select AaA.col1 from t1Aa as AaA;
+show create view v1AA;
+drop view v1AA;
+drop table t1Aa;
diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test
index fd7fe0e60d8..6551732adba 100644
--- a/mysql-test/t/ndb_autodiscover.test
+++ b/mysql-test/t/ndb_autodiscover.test
@@ -199,7 +199,7 @@ insert into t4 values (1, "Automatic");
select * from t4;
# Remove the table from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t4 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ;
#
# Test that correct error is returned
@@ -230,7 +230,7 @@ select * from t4;
flush tables;
# Remove the table from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t4 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ;
SHOW TABLES;
@@ -264,8 +264,8 @@ insert into t8 values (8, "myisam table 8");
insert into t9 values (9);
# Remove t3, t5 from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t3 > /dev/null ;
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t5 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ;
# Remove t6, t7 from disk
system rm var/master-data/test/t6.frm > /dev/null ;
system rm var/master-data/test/t7.frm > /dev/null ;
@@ -306,8 +306,8 @@ insert into t8 values (8, "myisam table 8");
insert into t9 values (9);
# Remove t3, t5 from NDB
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t3 > /dev/null ;
-system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t5 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ;
+system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ;
# Remove t6, t7 from disk
system rm var/master-data/test/t6.frm > /dev/null ;
system rm var/master-data/test/t7.frm > /dev/null ;
@@ -479,4 +479,4 @@ create table t10 (
insert into t10 values (1, 'kalle');
---exec $NDB_TOOLS_DIR/ndb_drop_table -d test `$NDB_TOOLS_DIR/ndb_show_tables | grep BLOB` > /dev/null 2>&1 || true
+--exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` > /dev/null 2>&1 || true
diff --git a/mysql-test/t/ndb_index_unique.test b/mysql-test/t/ndb_index_unique.test
index bdb23949763..397a2c45a9f 100644
--- a/mysql-test/t/ndb_index_unique.test
+++ b/mysql-test/t/ndb_index_unique.test
@@ -30,6 +30,32 @@ select * from t1 order by a;
drop table t1;
+#
+# Indexing NULL values
+#
+
+CREATE TABLE t1 (
+ a int unsigned NOT NULL PRIMARY KEY,
+ b int unsigned,
+ c int unsigned,
+ UNIQUE bc(b,c)
+) engine = ndb;
+
+insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL);
+select * from t1 use index (bc) where b IS NULL order by a;
+
+select * from t1 use index (bc)order by a;
+select * from t1 use index (bc) order by a;
+select * from t1 use index (PRIMARY) where b IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL and c IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
+select * from t1 use index (bc) where b < 4 order by a;
+select * from t1 use index (bc) where b IS NOT NULL order by a;
+-- error 1062
+insert into t1 values(5,1,1);
+drop table t1;
+
#
# Show use of UNIQUE USING HASH indexes
@@ -58,6 +84,14 @@ select * from t2 order by a;
drop table t2;
+-- error 1121
+CREATE TABLE t2 (
+ a int unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned,
+ UNIQUE USING HASH (b, c)
+) engine=ndbcluster;
+
#
# Show use of PRIMARY KEY USING HASH indexes
#
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index 1d23a09a71e..157b125a13d 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -458,6 +458,20 @@ EXECUTE stmt;
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
+#
+# Bug#6297 "prepared statement, wrong handling of <parameter> IS NULL"
+# Test that placeholders work with IS NULL/IS NOT NULL clauses.
+#
+prepare stmt from "select ? is null, ? is not null, ?";
+select @no_such_var is null, @no_such_var is not null, @no_such_var;
+execute stmt using @no_such_var, @no_such_var, @no_such_var;
+set @var='abc';
+select @var is null, @var is not null, @var;
+execute stmt using @var, @var, @var;
+set @var=null;
+select @var is null, @var is not null, @var;
+execute stmt using @var, @var, @var;
+
#
# Bug#6102 "Server crash with prepared statement and blank after
# function name"
@@ -469,3 +483,4 @@ insert into t1 values ('foo');
--error 1305
prepare stmt FROM 'SELECT char_length (a) FROM t1';
drop table t1;
+
diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test
index 0b96f71d585..18cf614f338 100644
--- a/mysql-test/t/range.test
+++ b/mysql-test/t/range.test
@@ -181,8 +181,8 @@ create table t1 (x int, y int, index(x), index(y));
insert into t1 (x) values (1),(2),(3),(4),(5),(6),(7),(8),(9);
update t1 set y=x;
# between with only one end fixed
-explain select * from t1, t1 t2 where t1.y = 2 and t2.x between 7 and t1.y+0;
-explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= 7 and t2.x <= t1.y+0;
+explain select * from t1, t1 t2 where t1.y = 8 and t2.x between 7 and t1.y+0;
+explain select * from t1, t1 t2 where t1.y = 8 and t2.x >= 7 and t2.x <= t1.y+0;
# between with both expressions on both ends
explain select * from t1, t1 t2 where t1.y = 2 and t2.x between t1.y-1 and t1.y+1;
explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= t1.y-1 and t2.x <= t1.y+1;
diff --git a/mysql-test/t/rpl000001.test b/mysql-test/t/rpl000001.test
index 2e0ba2fff25..835af92186f 100644
--- a/mysql-test/t/rpl000001.test
+++ b/mysql-test/t/rpl000001.test
@@ -98,7 +98,7 @@ wait_for_slave_to_stop;
# The following test can't be done because the result of Pos will differ
# on different computers
-# --replace_result 9306 9999 3334 9999 3335 9999
+# --replace_result $MASTER_MYPORT MASTER_PORT
# show slave status;
set global sql_slave_skip_counter=1;
diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test
index 16556c4864c..9cf9f13cfea 100644
--- a/mysql-test/t/subselect.test
+++ b/mysql-test/t/subselect.test
@@ -1287,3 +1287,22 @@ INSERT INTO `t2` VALUES (6,5,12,7,'a'),(12,0,0,7,'a'),(12,1,0,7,'a'),(12,5,5,7,'
SELECT b.sc FROM (SELECT (SELECT a.access FROM t1 a WHERE a.map = op.map AND a.slave = op.pid AND a.master = 1) ac FROM t2 op WHERE op.id = 12 AND op.map = 0) b;
SELECT b.ac FROM (SELECT (SELECT a.access FROM t1 a WHERE a.map = op.map AND a.slave = op.pid AND a.master = 1) ac FROM t2 op WHERE op.id = 12 AND op.map = 0) b;
drop tables t1,t2;
+
+#
+# Test for bug #6462. "Same request on same data returns different
+# results." a.k.a. "Proper cleanup of subqueries is missing for
+# SET and DO statements".
+#
+create table t1 (a int not null, b int not null, c int, primary key (a,b));
+insert into t1 values (1,1,1), (2,2,2), (3,3,3);
+set @b:= 0;
+# Let us check that subquery will use covering index
+explain select sum(a) from t1 where b > @b;
+# This should not crash -debug server due to failing assertion
+set @a:= (select sum(a) from t1 where b > @b);
+# And this should not falsely report index usage
+explain select a from t1 where c=2;
+# Same for DO statement
+do @a:= (select sum(a) from t1 where b > @b);
+explain select a from t1 where c=2;
+drop table t1;
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index 7dc976cf716..d4879b22bae 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -161,8 +161,10 @@ create trigger trg before update on t1 for each row set old.i:=1;
create trigger trg before delete on t1 for each row set new.i:=1;
--error 1362
create trigger trg after update on t1 for each row set new.i:=1;
-# TODO: We should also test wrong field names here, we don't do it now
-# because proper error handling is not in place yet.
+--error 1054
+create trigger trg before update on t1 for each row set new.j:=1;
+--error 1054
+create trigger trg before update on t1 for each row set @a:=old.j;
#
diff --git a/mysql-test/t/type_blob.test b/mysql-test/t/type_blob.test
index b67fa7a552d..f70193ddbe0 100644
--- a/mysql-test/t/type_blob.test
+++ b/mysql-test/t/type_blob.test
@@ -369,8 +369,8 @@ explain select * from t1 where txt='Chevy' or txt is NULL order by txt;
select * from t1 where txt='Chevy' or txt is NULL order by txt;
drop table t1;
-CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1)));
-INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
+CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, d varchar(1) NOT NULL DEFAULT ' ', PRIMARY KEY (i), KEY (c(1),d));
+INSERT t1 (i, c) VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
select max(i) from t1 where c = '';
drop table t1;
diff --git a/mysys/charset.c b/mysys/charset.c
index 1388fc40c6d..cb2379f8723 100644
--- a/mysys/charset.c
+++ b/mysys/charset.c
@@ -228,6 +228,7 @@ static int add_collation(CHARSET_INFO *cs)
}
else
{
+ uchar *sort_order= all_charsets[cs->number]->sort_order;
simple_cs_init_functions(all_charsets[cs->number]);
new->mbminlen= 1;
new->mbmaxlen= 1;
@@ -236,6 +237,16 @@ static int add_collation(CHARSET_INFO *cs)
all_charsets[cs->number]->state |= MY_CS_LOADED;
}
all_charsets[cs->number]->state|= MY_CS_AVAILABLE;
+
+ /*
+ Check if case sensitive sort order: A < a < B.
+ We need MY_CS_FLAG for regex library, and for
+ case sensitivity flag for 5.0 client protocol,
+ to support isCaseSensitive() method in JDBC driver
+ */
+ if (sort_order && sort_order['A'] < sort_order['a'] &&
+ sort_order['a'] < sort_order['B'])
+ all_charsets[cs->number]->state|= MY_CS_CSSORT;
}
}
else
diff --git a/ndb/docs/wl2077.txt b/ndb/docs/wl2077.txt
new file mode 100644
index 00000000000..5a77c18aa2a
--- /dev/null
+++ b/ndb/docs/wl2077.txt
@@ -0,0 +1,35 @@
+
+100' * (select 1 from T1 (1M rows) where key = rand());
+1 host, 1 ndbd, api co-hosted
+results in 1000 rows / sec
+
+ wo/reset bounds w/ rb
+4.1-read committed a) 4.9 b) 7.4
+4.1-read hold lock c) 4.7 d) 6.7
+
+wl2077-read committed 6.4 (+30%) 10.8 (+45%)
+wl2077-read hold lock 4.6 (-1%) 6.7 (+ 0%)
+
+-- Comparision e)
+serial pk: 10.9'
+batched (1000): 59'
+serial uniq index: 8.4'
+batched (1000): 33'
+index range (1000): 186'
+
+----
+
+load) testScanPerf -c 1 -d 1 T1
+a) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 0 T1
+b) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 1 T1
+c) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 0 T1
+d) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 1 T1
+e) testReadPerf -i 25 -c 0 -d 0 T1
+
+--- music join 1db-co 2db-co
+
+4.1 13s 14s
+4.1 wo/ blobs 1.7s 3.2s
+
+wl2077 12s 14s
+wl2077 wo/ blobs 1.2s (-30%) 2.5s (-22%)
diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am
index ca2e8152352..61f55cf9d61 100644
--- a/ndb/include/Makefile.am
+++ b/ndb/include/Makefile.am
@@ -28,15 +28,14 @@ ndbapi/NdbIndexScanOperation.hpp \
ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
-mgmapi/LocalConfig.hpp \
mgmapi/mgmapi.h \
-mgmapi/mgmapi_debug.h
+mgmapi/mgmapi_debug.h \
+mgmapi/mgmapi_config_parameters.h \
+mgmapi/mgmapi_config_parameters_debug.h
noinst_HEADERS = \
ndb_global.h \
-ndb_net.h \
-mgmapi/mgmapi_config_parameters.h \
-mgmapi/mgmapi_config_parameters_debug.h
+ndb_net.h
EXTRA_DIST = debugger editline kernel logger mgmcommon \
portlib transporter util
diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h
index f1ef357421b..a23417f153a 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/ndb/include/mgmapi/mgmapi.h
@@ -356,11 +356,26 @@ extern "C" {
/**
* Create a handle to a management server
*
- * @return A management handle<br>
- * or NULL if no management handle could be created.
+ * @return A management handle<br>
+ * or NULL if no management handle could be created.
*/
NdbMgmHandle ndb_mgm_create_handle();
+ /**
+ * Set connecst string to management server
+ *
+ * @param handle Management handle
+ * @param connect_string Connect string to the management server,
+ *
+ * @return -1 on error.
+ */
+ int ndb_mgm_set_connectstring(NdbMgmHandle handle,
+ const char *connect_string);
+
+ int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle);
+ int ndb_mgm_get_connected_port(NdbMgmHandle handle);
+ const char *ndb_mgm_get_connected_host(NdbMgmHandle handle);
+
/**
* Destroy a management server handle
*
@@ -378,11 +393,10 @@ extern "C" {
* Connect to a management server
*
* @param handle Management handle.
- * @param mgmsrv Hostname and port of the management server,
- * "hostname:port".
* @return -1 on error.
*/
- int ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv);
+ int ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
+ int retry_delay_in_seconds, int verbose);
/**
* Disconnect from a management server
@@ -709,9 +723,7 @@ extern "C" {
void ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *);
int ndb_mgm_alloc_nodeid(NdbMgmHandle handle,
- unsigned version,
- unsigned *pnodeid,
- int nodetype);
+ unsigned version, int nodetype);
/**
* Config iterator
*/
diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp
index 6c32255e921..80449628867 100644
--- a/ndb/include/mgmcommon/ConfigRetriever.hpp
+++ b/ndb/include/mgmcommon/ConfigRetriever.hpp
@@ -20,7 +20,6 @@
#include <ndb_types.h>
#include <mgmapi.h>
#include <BaseString.hpp>
-#include <LocalConfig.hpp>
/**
* @class ConfigRetriever
@@ -28,10 +27,11 @@
*/
class ConfigRetriever {
public:
- ConfigRetriever(LocalConfig &local_config, Uint32 version, Uint32 nodeType);
+ ConfigRetriever(const char * _connect_string,
+ Uint32 version, Uint32 nodeType);
~ConfigRetriever();
- int do_connect(int exit_on_connect_failure= false);
+ int do_connect(int no_retries, int retry_delay_in_seconds, int verbose);
/**
* Get configuration for current node.
@@ -46,12 +46,14 @@ public:
*/
struct ndb_mgm_configuration * getConfig();
+ void resetError();
+ int hasError();
const char * getErrorString();
/**
* @return Node id of this node (as stated in local config or connectString)
*/
- Uint32 allocNodeId();
+ Uint32 allocNodeId(int no_retries, int retry_delay_in_seconds);
/**
* Get config using socket
@@ -68,22 +70,26 @@ public:
*/
bool verifyConfig(const struct ndb_mgm_configuration *, Uint32 nodeid);
- Uint32 get_mgmd_port() const {return m_mgmd_port;};
- const char *get_mgmd_host() const {return m_mgmd_host;};
+ Uint32 get_mgmd_port() const;
+ const char *get_mgmd_host() const;
+
+ Uint32 get_configuration_nodeid() const;
private:
BaseString errorString;
enum ErrorType {
- CR_ERROR = 0,
- CR_RETRY = 1
+ CR_NO_ERROR = 0,
+ CR_ERROR = 1,
+ CR_RETRY = 2
};
ErrorType latestErrorType;
void setError(ErrorType, const char * errorMsg);
- struct LocalConfig& _localConfig;
- Uint32 _ownNodeId;
+ Uint32 _ownNodeId;
+ /*
Uint32 m_mgmd_port;
const char *m_mgmd_host;
+ */
Uint32 m_version;
Uint32 m_node_type;
diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp
index 7af5d27b922..256199dced7 100644
--- a/ndb/include/ndbapi/NdbConnection.hpp
+++ b/ndb/include/ndbapi/NdbConnection.hpp
@@ -607,8 +607,8 @@ private:
NdbOperation* theLastExecOpInList; // Last executing operation in list.
- NdbOperation* theCompletedFirstOp; // First operation in completed
- // operation list.
+ NdbOperation* theCompletedFirstOp; // First & last operation in completed
+ NdbOperation* theCompletedLastOp; // operation list.
Uint32 theNoOfOpSent; // How many operations have been sent
Uint32 theNoOfOpCompleted; // How many operations have completed
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 66b3fc9d43b..a3388f62f58 100644
--- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -113,7 +113,7 @@ public:
* Reset bounds and put operation in list that will be
* sent on next execute
*/
- int reset_bounds();
+ int reset_bounds(bool forceSend = false);
bool getSorted() const { return m_ordered; }
private:
@@ -127,8 +127,8 @@ private:
virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*);
void fix_get_values();
- int next_result_ordered(bool fetchAllowed);
- int send_next_scan_ordered(Uint32 idx);
+ int next_result_ordered(bool fetchAllowed, bool forceSend = false);
+ int send_next_scan_ordered(Uint32 idx, bool forceSend = false);
int compare(Uint32 key, Uint32 cols, const NdbReceiver*, const NdbReceiver*);
Uint32 m_sort_columns;
diff --git a/ndb/include/ndbapi/NdbResultSet.hpp b/ndb/include/ndbapi/NdbResultSet.hpp
index 478daf8aad2..dc0288a380c 100644
--- a/ndb/include/ndbapi/NdbResultSet.hpp
+++ b/ndb/include/ndbapi/NdbResultSet.hpp
@@ -89,17 +89,17 @@ public:
* - 1: if there are no more tuples to scan.
* - 2: if there are no more cached records in NdbApi
*/
- int nextResult(bool fetchAllowed = true);
+ int nextResult(bool fetchAllowed = true, bool forceSend = false);
/**
* Close result set (scan)
*/
- void close();
+ void close(bool forceSend = false);
/**
* Restart
*/
- int restart();
+ int restart(bool forceSend = false);
/**
* Transfer scan operation to an updating transaction. Use this function
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp
index 2e4d173ac75..3c95c79e776 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -90,11 +90,11 @@ protected:
NdbScanOperation(Ndb* aNdb);
virtual ~NdbScanOperation();
- int nextResult(bool fetchAllowed = true);
+ int nextResult(bool fetchAllowed = true, bool forceSend = false);
virtual void release();
- void closeScan();
- int close_impl(class TransporterFacade*);
+ void closeScan(bool forceSend = false);
+ int close_impl(class TransporterFacade*, bool forceSend = false);
// Overloaded methods from NdbCursorOperation
int executeCursor(int ProcessorId);
@@ -103,6 +103,7 @@ protected:
int init(const NdbTableImpl* tab, NdbConnection* myConnection);
int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId);
int doSend(int ProcessorId);
+ void checkForceSend(bool forceSend);
virtual void setErrorCode(int aErrorCode);
virtual void setErrorCodeAbort(int aErrorCode);
@@ -138,7 +139,7 @@ protected:
Uint32 m_sent_receivers_count; // NOTE needs mutex to access
NdbReceiver** m_sent_receivers; // receive thread puts them here
- int send_next_scan(Uint32 cnt, bool close);
+ int send_next_scan(Uint32 cnt, bool close, bool forceSend = false);
void receiver_delivered(NdbReceiver*);
void receiver_completed(NdbReceiver*);
void execCLOSE_SCAN_REP();
@@ -148,7 +149,7 @@ protected:
Uint32 m_ordered;
- int restart();
+ int restart(bool forceSend = false);
};
inline
diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/ndb/include/ndbapi/ndb_cluster_connection.hpp
index f8e6f25ce73..59d5a038844 100644
--- a/ndb/include/ndbapi/ndb_cluster_connection.hpp
+++ b/ndb/include/ndbapi/ndb_cluster_connection.hpp
@@ -19,7 +19,6 @@
#define CLUSTER_CONNECTION_HPP
class TransporterFacade;
-class LocalConfig;
class ConfigRetriever;
class NdbThread;
@@ -38,7 +37,6 @@ private:
void connect_thread();
char *m_connect_string;
TransporterFacade *m_facade;
- LocalConfig *m_local_config;
ConfigRetriever *m_config_retriever;
NdbThread *m_connect_thread;
int (*m_connect_callback)(void);
diff --git a/ndb/include/util/ndb_opts.h b/ndb/include/util/ndb_opts.h
index 6cba9c04449..f7ae3b5489e 100644
--- a/ndb/include/util/ndb_opts.h
+++ b/ndb/include/util/ndb_opts.h
@@ -32,10 +32,13 @@
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \
{ "version", 'V', "Output version information and exit.", 0, 0, 0, \
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \
- { "connect-string", 'c', \
+ { "ndb-connectstring", 'c', \
"Set connect string for connecting to ndb_mgmd. " \
- "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". " \
- "Overides specifying entries in NDB_CONNECTSTRING and config file", \
+ "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \
+ "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \
+ (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
+ { "connect-string", 'c', "same as --ndb-connectstring",\
(gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }
#else
@@ -46,11 +49,14 @@
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \
{ "version", 'V', "Output version information and exit.", 0, 0, 0, \
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \
- { "connect-string", 'c', \
+ { "ndb-connectstring", 'c', \
"Set connect string for connecting to ndb_mgmd. " \
- "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". " \
- "Overides specifying entries in NDB_CONNECTSTRING and config file", \
+ "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \
+ "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \
(gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
+ { "connect-string", 'c', "same as --ndb-connectstring",\
+ (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,\
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }
#endif
diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/ndb/src/common/debugger/signaldata/ScanTab.cpp
index 72a4d9f94b9..0755ee0a856 100644
--- a/ndb/src/common/debugger/signaldata/ScanTab.cpp
+++ b/ndb/src/common/debugger/signaldata/ScanTab.cpp
@@ -30,13 +30,14 @@ printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv
fprintf(output, " apiConnectPtr: H\'%.8x",
sig->apiConnectPtr);
fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo);
- fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u\n",
+ fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u ReadCommitted: %u\n",
sig->getParallelism(requestInfo),
sig->getScanBatch(requestInfo),
sig->getLockMode(requestInfo),
+ sig->getKeyinfoFlag(requestInfo),
sig->getHoldLockFlag(requestInfo),
sig->getRangeScanFlag(requestInfo),
- sig->getKeyinfoFlag(requestInfo));
+ sig->getReadCommittedFlag(requestInfo));
Uint32 keyLen = (sig->attrLenKeyLen >> 16);
Uint32 attrLen = (sig->attrLenKeyLen & 0xFFFF);
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index a1b979f62d8..0af5eb2f83c 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -20,7 +20,6 @@
#include <ConfigRetriever.hpp>
#include <SocketServer.hpp>
-#include "LocalConfig.hpp"
#include <NdbSleep.h>
#include <NdbOut.hpp>
@@ -45,90 +44,62 @@
//****************************************************************************
//****************************************************************************
-ConfigRetriever::ConfigRetriever(LocalConfig &local_config,
+ConfigRetriever::ConfigRetriever(const char * _connect_string,
Uint32 version, Uint32 node_type)
- : _localConfig(local_config)
{
- m_handle= 0;
m_version = version;
m_node_type = node_type;
- _ownNodeId = _localConfig._ownNodeId;
-}
+ _ownNodeId= 0;
-ConfigRetriever::~ConfigRetriever(){
+ m_handle= ndb_mgm_create_handle();
+ if (m_handle == 0) {
+ setError(CR_ERROR, "Unable to allocate mgm handle");
+ return;
+ }
+
+ if (ndb_mgm_set_connectstring(m_handle, _connect_string))
+ {
+ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ return;
+ }
+ resetError();
+}
+
+ConfigRetriever::~ConfigRetriever()
+{
if (m_handle) {
ndb_mgm_disconnect(m_handle);
ndb_mgm_destroy_handle(&m_handle);
}
}
+Uint32
+ConfigRetriever::get_configuration_nodeid() const
+{
+ return ndb_mgm_get_configuration_nodeid(m_handle);
+}
+
+Uint32 ConfigRetriever::get_mgmd_port() const
+{
+ return ndb_mgm_get_connected_port(m_handle);
+}
+
+const char *ConfigRetriever::get_mgmd_host() const
+{
+ return ndb_mgm_get_connected_host(m_handle);
+}
//****************************************************************************
//****************************************************************************
int
-ConfigRetriever::do_connect(int exit_on_connect_failure){
-
- m_mgmd_port= 0;
- m_mgmd_host= 0;
-
- if(!m_handle)
- m_handle= ndb_mgm_create_handle();
-
- if (m_handle == 0) {
- setError(CR_ERROR, "Unable to allocate mgm handle");
- return -1;
- }
-
- int retry = 1;
- int retry_max = 12; // Max number of retry attempts
- int retry_interval= 5; // Seconds between each retry
- while(retry < retry_max){
- Uint32 type = CR_ERROR;
- BaseString tmp;
- for (unsigned int i = 0; i<_localConfig.ids.size(); i++){
- MgmtSrvrId * m = &_localConfig.ids[i];
- DBUG_PRINT("info",("trying %s:%d",
- m->name.c_str(),
- m->port));
- switch(m->type){
- case MgmId_TCP:
- tmp.assfmt("%s:%d", m->name.c_str(), m->port);
- if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) {
- m_mgmd_port= m->port;
- m_mgmd_host= m->name.c_str();
- DBUG_PRINT("info",("connected to ndb_mgmd at %s:%d",
- m_mgmd_host,
- m_mgmd_port));
- return 0;
- }
- setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle));
- case MgmId_File:
- break;
- }
- }
- if(latestErrorType == CR_RETRY){
- DBUG_PRINT("info",("CR_RETRY"));
- if (exit_on_connect_failure)
- return 1;
- REPORT_WARNING("Failed to retrieve cluster configuration");
- ndbout << "(Cause of failure: " << getErrorString() << ")" << endl;
- ndbout << "Attempt " << retry << " of " << retry_max << ". "
- << "Trying again in "<< retry_interval <<" seconds..."
- << endl << endl;
- NdbSleep_SecSleep(retry_interval);
- } else {
- break;
- }
- retry++;
- }
-
- ndb_mgm_destroy_handle(&m_handle);
- m_handle= 0;
- m_mgmd_port= 0;
- m_mgmd_host= 0;
- return -1;
+ConfigRetriever::do_connect(int no_retries,
+ int retry_delay_in_seconds, int verbose)
+{
+ return
+ (ndb_mgm_connect(m_handle,no_retries,retry_delay_in_seconds,verbose)==0) ?
+ 0 : -1;
}
//****************************************************************************
@@ -140,22 +111,9 @@ ConfigRetriever::getConfig() {
struct ndb_mgm_configuration * p = 0;
- if(m_handle != 0){
+ if(m_handle != 0)
p = getConfig(m_handle);
- } else {
- for (unsigned int i = 0; i<_localConfig.ids.size(); i++){
- MgmtSrvrId * m = &_localConfig.ids[i];
- switch(m->type){
- case MgmId_File:
- p = getConfig(m->name.c_str());
- break;
- case MgmId_TCP:
- break;
- }
- if(p)
- break;
- }
- }
+
if(p == 0)
return 0;
@@ -227,6 +185,16 @@ ConfigRetriever::setError(ErrorType et, const char * s){
latestErrorType = et;
}
+void
+ConfigRetriever::resetError(){
+ setError(CR_NO_ERROR,0);
+}
+
+int
+ConfigRetriever::hasError()
+{
+ return latestErrorType != CR_NO_ERROR;
+}
const char *
ConfigRetriever::getErrorString(){
@@ -341,16 +309,23 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
}
Uint32
-ConfigRetriever::allocNodeId(){
- unsigned nodeid= _ownNodeId;
-
- if(m_handle != 0){
- int res= ndb_mgm_alloc_nodeid(m_handle, m_version, &nodeid, m_node_type);
- if(res != 0) {
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
- return 0;
+ConfigRetriever::allocNodeId(int no_retries, int retry_delay_in_seconds)
+{
+ _ownNodeId= 0;
+ if(m_handle != 0)
+ {
+ while (1)
+ {
+ int res= ndb_mgm_alloc_nodeid(m_handle, m_version, m_node_type);
+ if(res >= 0)
+ return _ownNodeId= (Uint32)res;
+ if (no_retries == 0)
+ break;
+ no_retries--;
+ NdbSleep_SecSleep(retry_delay_in_seconds);
}
- }
-
- return _ownNodeId= nodeid;
+ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ } else
+ setError(CR_ERROR, "management server handle not initialized");
+ return 0;
}
diff --git a/ndb/src/common/util/version.c b/ndb/src/common/util/version.c
index 965d0a735e1..7a537297861 100644
--- a/ndb/src/common/util/version.c
+++ b/ndb/src/common/util/version.c
@@ -70,7 +70,6 @@ struct NdbUpGradeCompatible {
#ifndef TEST_VERSION
struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
{ MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact },
- { MAKE_VERSION(4,1,8), MAKE_VERSION(3,5,4), UG_Exact }, /* Aligned version with MySQL */
{ 0, 0, UG_Null }
};
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index 7ff03684cff..5193d3eae9d 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -1,7 +1,7 @@
Next QMGR 1
Next NDBCNTR 1000
Next NDBFS 2000
-Next DBACC 3001
+Next DBACC 3002
Next DBTUP 4013
Next DBLQH 5042
Next DBDICT 6006
@@ -393,6 +393,7 @@ Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
+3001: Fail create 1st fragment
4007 12001: Fail create 1st fragment
4008 12002: Fail create 2nd fragment
4009 12003: Fail create 1st attribute in 1st fragment
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index c275e5382f7..5c7cc597672 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -1062,7 +1062,21 @@ void Dbacc::execACCFRAGREQ(Signal* signal)
{
const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
jamEntry();
+ if (ERROR_INSERTED(3001)) {
+ jam();
+ addFragRefuse(signal, 1);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
tabptr.i = req->tableId;
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (tabptr.i >= ctablesize) {
+ jam();
+ addFragRefuse(signal, 800);
+ return;
+ }
+#endif
ptrCheckGuard(tabptr, ctablesize, tabrec);
ndbrequire((req->reqInfo & 0xF) == ZADDFRAG);
ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId));
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index 14fa262f871..0a2d50cb876 100644
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -147,7 +147,6 @@ public:
Uint32 nfConnect;
Uint32 table;
Uint32 userpointer;
- Uint32 nodeCount;
BlockReference userblockref;
};
typedef Ptr<ConnectRecord> ConnectRecordPtr;
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 76aa745c3e0..4592b121c7e 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -7080,24 +7080,22 @@ void Dbdih::execDIGETPRIMREQ(Signal* signal)
ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
connectPtr.i = signal->theData[0];
- if(connectPtr.i != RNIL){
+ if(connectPtr.i != RNIL)
+ {
jam();
ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- ndbrequire(connectPtr.p->connectState == ConnectRecord::INUSE);
- getFragstore(tabPtr.p, fragId, fragPtr);
- connectPtr.p->nodeCount = extractNodeInfo(fragPtr.p, connectPtr.p->nodes);
signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = passThrough;
- signal->theData[2] = connectPtr.p->nodes[0];
- sendSignal(connectPtr.p->userblockref, GSN_DIGETPRIMCONF, signal, 3, JBB);
- return;
- }//if
- //connectPtr.i == RNIL -> question without connect record
+ }
+ else
+ {
+ jam();
+ signal->theData[0] = RNIL;
+ }
+
Uint32 nodes[MAX_REPLICAS];
getFragstore(tabPtr.p, fragId, fragPtr);
Uint32 count = extractNodeInfo(fragPtr.p, nodes);
- signal->theData[0] = RNIL;
signal->theData[1] = passThrough;
signal->theData[2] = nodes[0];
signal->theData[3] = nodes[1];
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 739c3c741fb..0c63cb5fe17 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -550,6 +550,11 @@ public:
UintR scanErrorCounter;
UintR scanLocalFragid;
UintR scanSchemaVersion;
+
+ /**
+ * This is _always_ main table, even in range scan
+ * in which case scanTcrec->fragmentptr is different
+ */
Uint32 fragPtrI;
UintR scanStoredProcId;
ScanState scanState;
@@ -2925,4 +2930,23 @@ Dblqh::ScanRecord::check_scan_batch_completed() const
(max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
}
+inline
+void
+Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
+{
+ if (index == 0) {
+ acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
+ } else {
+ Uint32 attr_buf_index, attr_buf_rec;
+
+ AttrbufPtr regAttrPtr;
+ jam();
+ attr_buf_rec= (index + 31) / 32;
+ attr_buf_index= (index - 1) & 31;
+ regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
+ }
+}
+
#endif
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 5622706a96c..88e8f25b004 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -3084,6 +3084,7 @@ void Dblqh::execATTRINFO(Signal* signal)
return;
break;
default:
+ ndbout_c("%d", regTcPtr->transactionState);
ndbrequire(false);
break;
}//switch
@@ -7161,10 +7162,7 @@ void Dblqh::continueScanNextReqLab(Signal* signal)
// Update timer on tcConnectRecord
tcConnectptr.p->tcTimer = cLqhTimeOutCount;
-
init_acc_ptr_list(scanptr.p);
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
scanNextLoopLab(signal);
}//Dblqh::continueScanNextReqLab()
@@ -7363,22 +7361,32 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
tcConnectptr.i = scanptr.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
releaseActiveFrag(signal);
+
if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
if ((scanptr.p->scanErrorCounter > 0) ||
(scanptr.p->scanCompletedStatus == ZTRUE)) {
jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
closeScanLab(signal);
} else if (scanptr.p->check_scan_batch_completed() &&
scanptr.p->scanLockHold != ZTRUE) {
jam();
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
sendScanFragConf(signal, ZFALSE);
+ } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
+ jam();
+ closeScanLab(signal);
+ return;
} else {
jam();
/*
- We came here after releasing locks after receiving SCAN_NEXTREQ from TC. We only
- come here when scanHoldLock == ZTRUE
- */
+ * We came here after releasing locks after
+ * receiving SCAN_NEXTREQ from TC. We only come here
+ * when scanHoldLock == ZTRUE
+ */
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
continueScanNextReqLab(signal);
}//if
} else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
@@ -7465,25 +7473,6 @@ Dblqh::init_acc_ptr_list(ScanRecord* scanP)
scanP->scan_acc_index = 0;
}
-inline
-void
-Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
-{
- if (index == 0) {
- acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
- } else {
- Uint32 attr_buf_index, attr_buf_rec;
-
- AttrbufPtr regAttrPtr;
- jam();
- attr_buf_rec= (index + 31) / 32;
- attr_buf_index= (index - 1) & 31;
- regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
- ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
- acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
- }
-}
-
Uint32
Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
Uint32 index,
@@ -7714,6 +7703,9 @@ void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
jam();
scanptr.i = scan_ptr_i;
c_scanRecordPool.getPtr(scanptr);
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
finishScanrec(signal);
releaseScanrec(signal);
tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
@@ -8007,6 +7999,13 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
/*************************************************************
* STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
************************************************************ */
+ if (!scanptr.p->scanLockHold)
+ {
+ jam();
+ closeScanLab(signal);
+ return;
+ }
+
if (scanptr.p->scanCompletedStatus == ZTRUE) {
if ((scanptr.p->scanLockHold == ZTRUE) &&
(scanptr.p->m_curr_batch_size_rows > 0)) {
@@ -8507,8 +8506,6 @@ void Dblqh::tupScanCloseConfLab(Signal* signal)
ScanFragRef::SignalLength, JBB);
} else {
jam();
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
}//if
finishScanrec(signal);
@@ -8576,10 +8573,12 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
/**
* Used for scan take over
*/
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- scanptr.p->fragPtrI = fragptr.p->tableFragptr;
+ {
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ scanptr.p->fragPtrI = fragptr.p->tableFragptr;
+ }
/**
* !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
@@ -8588,8 +8587,8 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
Uint32 start = (idx ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
Uint32 stop = (idx ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
stop += start;
- Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
-
+ Uint32 free = fragptr.p->m_scanNumberMask.find(start);
+
if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
jam();
@@ -8603,16 +8602,16 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
*/
scanptr.p->scanState = ScanRecord::IN_QUEUE;
LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- tFragPtr.p->m_queuedScans);
+ fragptr.p->m_queuedScans);
queue.add(scanptr);
return ZOK;
}
-
+
scanptr.p->scanNumber = free;
- tFragPtr.p->m_scanNumberMask.clear(free);// Update mask
+ fragptr.p->m_scanNumberMask.clear(free);// Update mask
- LocalDLList<ScanRecord> active(c_scanRecordPool, tFragPtr.p->m_activeScans);
+ LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans);
active.add(scanptr);
if(scanptr.p->scanKeyinfoFlag){
jam();
@@ -8672,12 +8671,8 @@ void Dblqh::finishScanrec(Signal* signal)
{
release_acc_ptr_list(scanptr.p);
- FragrecordPtr tFragPtr;
- tFragPtr.i = scanptr.p->fragPtrI;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
-
LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- tFragPtr.p->m_queuedScans);
+ fragptr.p->m_queuedScans);
if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
jam();
@@ -8695,11 +8690,11 @@ void Dblqh::finishScanrec(Signal* signal)
ndbrequire(tmp.p == scanptr.p);
}
- LocalDLList<ScanRecord> scans(c_scanRecordPool, tFragPtr.p->m_activeScans);
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
scans.release(scanptr);
const Uint32 scanNumber = scanptr.p->scanNumber;
- ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber));
+ ndbrequire(!fragptr.p->m_scanNumberMask.get(scanNumber));
ScanRecordPtr restart;
/**
@@ -8707,13 +8702,13 @@ void Dblqh::finishScanrec(Signal* signal)
*/
if(scanNumber == NR_ScanNo || !queue.first(restart)){
jam();
- tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ fragptr.p->m_scanNumberMask.set(scanNumber);
return;
}
if(ERROR_INSERTED(5034)){
jam();
- tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ fragptr.p->m_scanNumberMask.set(scanNumber);
return;
}
@@ -8724,7 +8719,7 @@ void Dblqh::finishScanrec(Signal* signal)
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
restart.p->scanNumber = scanNumber;
restart.p->scanState = ScanRecord::WAIT_ACC_SCAN;
-
+
queue.remove(restart);
scans.add(restart);
if(restart.p->scanKeyinfoFlag){
@@ -8912,6 +8907,13 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
conf->total_len= total_len;
sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
signal, ScanFragConf::SignalLength, JBB);
+
+ if(!scanptr.p->scanLockHold)
+ {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ }
}//Dblqh::sendScanFragConf()
/* ######################################################################### */
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index a209df24c44..fb90ccc8c90 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -1054,9 +1054,8 @@ public:
// Id of the ScanRecord this fragment scan belongs to
Uint32 scanRec;
- // The maximum number of operations that can be scanned before
- // returning to TC
- Uint16 scanFragConcurrency;
+ // The value of fragmentCompleted in the last received SCAN_FRAGCONF
+ Uint8 m_scan_frag_conf_status;
inline void startFragTimer(Uint32 timeVal){
scanFragTimer = timeVal;
@@ -1193,8 +1192,10 @@ public:
// Number of operation records per scanned fragment
// Number of operations in first batch
// Max number of bytes per batch
- Uint16 noOprecPerFrag;
- Uint16 first_batch_size;
+ union {
+ Uint16 first_batch_size_rows;
+ Uint16 batch_size_rows;
+ };
Uint32 batch_byte_size;
Uint32 scanRequestInfo; // ScanFrag format
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index d8b3ee10532..07dbb370ec6 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -8646,9 +8646,9 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
scanptr.p->scanTableref = tabptr.i;
scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
scanptr.p->scanParallel = scanParallel;
- scanptr.p->noOprecPerFrag = noOprecPerFrag;
- scanptr.p->first_batch_size= scanTabReq->first_batch_size;
- scanptr.p->batch_byte_size= scanTabReq->batch_byte_size;
+ scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size;
+ scanptr.p->batch_byte_size = scanTabReq->batch_byte_size;
+ scanptr.p->batch_size_rows = noOprecPerFrag;
Uint32 tmp = 0;
const UintR ri = scanTabReq->requestInfo;
@@ -8672,7 +8672,6 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
ndbrequire(list.seize(ptr));
ptr.p->scanRec = scanptr.i;
ptr.p->scanFragId = 0;
- ptr.p->scanFragConcurrency = noOprecPerFrag;
ptr.p->m_apiPtr = cdata[i];
}//for
@@ -8945,6 +8944,25 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ /**
+ * This must be false as select count(*) otherwise
+ * can "pass" committing on backup fragments and
+ * get incorrect row count
+ */
+ if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo))
+ {
+ jam();
+ Uint32 max = 3+signal->theData[6];
+ Uint32 nodeid = getOwnNodeId();
+ for(Uint32 i = 3; i<max; i++)
+ if(signal->theData[i] == nodeid)
+ {
+ jam();
+ tnodeid = nodeid;
+ break;
+ }
+ }
+
{
/**
* Check table
@@ -9141,6 +9159,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
const Uint32 noCompletedOps = conf->completedOps;
+ const Uint32 status = conf->fragmentCompleted;
scanFragptr.i = conf->senderData;
c_scan_frag_pool.getPtr(scanFragptr);
@@ -9163,11 +9182,9 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
- const Uint32 status = conf->fragmentCompleted;
-
if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
jam();
- if(status == ZFALSE){
+ if(status == 0){
/**
* We have started closing = we sent a close -> ignore this
*/
@@ -9184,11 +9201,11 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
return;
}
- if(status == ZCLOSED && scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
+ if(noCompletedOps == 0 && status != 0 &&
+ scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
/**
* Start on next fragment
*/
- ndbrequire(noCompletedOps == 0);
scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
scanFragptr.p->startFragTimer(ctcTimer);
@@ -9218,6 +9235,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
scanptr.p->m_queued_count++;
}
+ scanFragptr.p->m_scan_frag_conf_status = status;
scanFragptr.p->m_ops = noCompletedOps;
scanFragptr.p->m_totalLen = total_len;
scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
@@ -9311,7 +9329,6 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
/*********************************************************************
* APPLICATION IS CLOSING THE SCAN.
**********************************************************************/
- ndbrequire(len == 0);
close_scan_req(signal, scanptr, true);
return;
}//if
@@ -9330,11 +9347,12 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
// Copy op ptrs so I dont overwrite them when sending...
memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->closeFlag = ZFALSE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
- nextReq->batch_size_bytes= scanP->batch_byte_size;
+ ScanFragNextReq tmp;
+ tmp.closeFlag = ZFALSE;
+ tmp.transId1 = apiConnectptr.p->transid[0];
+ tmp.transId2 = apiConnectptr.p->transid[1];
+ tmp.batch_size_rows = scanP->batch_size_rows;
+ tmp.batch_size_bytes = scanP->batch_byte_size;
ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
@@ -9344,15 +9362,37 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
c_scan_frag_pool.getPtr(scanFragptr);
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
scanFragptr.p->startFragTimer(ctcTimer);
-
scanFragptr.p->m_ops = 0;
- nextReq->senderData = scanFragptr.i;
- nextReq->batch_size_rows= scanFragptr.p->scanFragConcurrency;
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
+ if(scanFragptr.p->m_scan_frag_conf_status)
+ {
+ /**
+ * last scan was complete
+ */
+ jam();
+ ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }
+ else
+ {
+ jam();
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend();
+ * req = tmp;
+ req->senderData = scanFragptr.i;
+ sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
delivered.remove(scanFragptr);
running.add(scanFragptr);
}//for
@@ -9416,7 +9456,7 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
delivered.remove(curr);
- if(curr.p->m_ops > 0){
+ if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){
jam();
running.add(curr);
curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
@@ -9551,7 +9591,7 @@ void Dbtc::sendScanFragReq(Signal* signal,
req->transId1 = apiConnectptr.p->transid[0];
req->transId2 = apiConnectptr.p->transid[1];
req->clientOpPtr = scanFragP->m_apiPtr;
- req->batch_size_rows= scanFragP->scanFragConcurrency;
+ req->batch_size_rows= scanP->batch_size_rows;
req->batch_size_bytes= scanP->batch_byte_size;
sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
ScanFragReq::SignalLength, JBB);
@@ -9573,6 +9613,8 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
jam();
ops += 21;
}
+
+ Uint32 left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
@@ -9588,24 +9630,25 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
ScanFragRecPtr curr = ptr; // Remove while iterating...
queued.next(ptr);
+ bool done = curr.p->m_scan_frag_conf_status && --left;
+
* ops++ = curr.p->m_apiPtr;
- * ops++ = curr.i;
+ * ops++ = done ? RNIL : curr.i;
* ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
queued.remove(curr);
- if(curr.p->m_ops > 0){
+ if(!done){
delivered.add(curr);
curr.p->scanFragState = ScanFragRec::DELIVERED;
curr.p->stopFragTimer();
} else {
- (* --ops) = ScanTabConf::EndOfData; ops++;
c_scan_frag_pool.release(curr);
curr.p->scanFragState = ScanFragRec::COMPLETED;
curr.p->stopFragTimer();
}
}
}
-
+
if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
scanPtr.p->m_running_scan_frags.isEmpty()){
conf->requestInfo = op_count | ScanTabConf::EndOfData;
@@ -10424,9 +10467,8 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sfp.i,
sfp.p->scanFragState,
sfp.p->scanFragId);
- infoEvent(" nodeid=%d, concurr=%d, timer=%d",
+ infoEvent(" nodeid=%d, timer=%d",
refToNode(sfp.p->lqhBlockref),
- sfp.p->scanFragConcurrency,
sfp.p->scanFragTimer);
}
@@ -10504,7 +10546,7 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sp.p->scanAiLength,
sp.p->scanParallel,
sp.p->scanReceivedOperations,
- sp.p->noOprecPerFrag);
+ sp.p->batch_size_rows);
infoEvent(" schv=%d, tab=%d, sproc=%d",
sp.p->scanSchemaVersion,
sp.p->scanTableref,
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 914dba00674..405f790954e 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -69,6 +69,17 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
Uint32 noOfAttributeGroups = signal->theData[12];
Uint32 globalCheckpointIdIndicator = signal->theData[13];
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (regTabPtr.i >= cnoOfTablerec) {
+ ljam();
+ signal->theData[0] = userptr;
+ signal->theData[1] = 800;
+ sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
+ return;
+ }
+#endif
+
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index d11d5f7176a..f6d9a0ac35a 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -1888,7 +1888,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
req->requestInfo = 0;
req->savePointId = 0;
ScanFragReq::setLockMode(req->requestInfo, 0);
- ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
+ ScanFragReq::setHoldLockFlag(req->requestInfo, 1);
ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
ScanFragReq::setAttrLen(req->requestInfo, attrLen);
req->fragmentNoKeyLen = fd.m_fragDesc.m_fragmentNo;
diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp
index 926647838c9..f34e16318cd 100644
--- a/ndb/src/kernel/main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -19,7 +19,6 @@
#include <ndb_version.h>
#include "Configuration.hpp"
-#include <LocalConfig.hpp>
#include <TransporterRegistry.hpp>
#include "vm/SimBlockList.hpp"
@@ -69,16 +68,9 @@ int main(int argc, char** argv)
return NRT_Default;
}
- LocalConfig local_config;
- if (!local_config.init(theConfig->getConnectString(),0)){
- local_config.printError();
- local_config.printUsage();
- return NRT_Default;
- }
-
{ // Do configuration
signal(SIGPIPE, SIG_IGN);
- theConfig->fetch_configuration(local_config);
+ theConfig->fetch_configuration();
}
chdir(NdbConfig_get_path(0));
@@ -141,7 +133,7 @@ int main(int argc, char** argv)
exit(0);
}
g_eventLogger.info("Ndb has terminated (pid %d) restarting", child);
- theConfig->fetch_configuration(local_config);
+ theConfig->fetch_configuration();
}
g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid());
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp
index aac035fe1b7..931b4da5a17 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/ndb/src/kernel/vm/Configuration.cpp
@@ -17,7 +17,6 @@
#include <ndb_global.h>
#include <ndb_opts.h>
-#include <LocalConfig.hpp>
#include "Configuration.hpp"
#include <ErrorHandlingMacros.hpp>
#include "GlobalData.hpp"
@@ -35,6 +34,7 @@
#include <kernel_types.h>
#include <ndb_limits.h>
+#include <ndbapi_limits.h>
#include "pc.hpp"
#include <LogLevel.hpp>
#include <NdbSleep.h>
@@ -108,7 +108,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
bool
Configuration::init(int argc, char** argv)
{
- const char *load_default_groups[]= { "ndbd",0 };
+ const char *load_default_groups[]= { "mysql_cluster","ndbd",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
@@ -189,7 +189,7 @@ Configuration::closeConfiguration(){
}
void
-Configuration::fetch_configuration(LocalConfig &local_config){
+Configuration::fetch_configuration(){
/**
* Fetch configuration from management server
*/
@@ -199,8 +199,17 @@ Configuration::fetch_configuration(LocalConfig &local_config){
m_mgmd_port= 0;
m_mgmd_host= 0;
- m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB);
- if(m_config_retriever->do_connect() == -1){
+ m_config_retriever= new ConfigRetriever(getConnectString(),
+ NDB_VERSION, NODE_TYPE_DB);
+
+ if (m_config_retriever->hasError())
+ {
+ ERROR_SET(fatal, ERR_INVALID_CONFIG,
+ "Could not connect initialize handle to management server",
+ m_config_retriever->getErrorString());
+ }
+
+ if(m_config_retriever->do_connect(12,5,1) == -1){
const char * s = m_config_retriever->getErrorString();
if(s == 0)
s = "No error given!";
@@ -215,13 +224,7 @@ Configuration::fetch_configuration(LocalConfig &local_config){
ConfigRetriever &cr= *m_config_retriever;
- if((globalData.ownId = cr.allocNodeId()) == 0){
- for(Uint32 i = 0; i<3; i++){
- NdbSleep_SecSleep(3);
- if((globalData.ownId = cr.allocNodeId()) != 0)
- break;
- }
- }
+ globalData.ownId = cr.allocNodeId(2 /*retry*/,3 /*delay*/);
if(globalData.ownId == 0){
ERROR_SET(fatal, ERR_INVALID_CONFIG,
@@ -452,6 +455,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
unsigned int noOfTables = 0;
unsigned int noOfUniqueHashIndexes = 0;
unsigned int noOfOrderedIndexes = 0;
+ unsigned int noOfTriggers = 0;
unsigned int noOfReplicas = 0;
unsigned int noOfDBNodes = 0;
unsigned int noOfAPINodes = 0;
@@ -476,6 +480,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
{ CFG_DB_NO_TABLES, &noOfTables, false },
{ CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes, false },
{ CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes, false },
+ { CFG_DB_NO_TRIGGERS, &noOfTriggers, true },
{ CFG_DB_NO_REPLICAS, &noOfReplicas, false },
{ CFG_DB_NO_ATTRIBUTES, &noOfAttributes, false },
{ CFG_DB_NO_OPS, &noOfOperations, false },
@@ -584,6 +589,18 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
ConfigValues::Iterator it2(*ownConfig, db.m_config);
it2.set(CFG_DB_NO_TABLES, noOfTables);
it2.set(CFG_DB_NO_ATTRIBUTES, noOfAttributes);
+ {
+ Uint32 neededNoOfTriggers = /* types: Insert/Update/Delete/Custom */
+ 3 * noOfUniqueHashIndexes + /* for unique hash indexes, I/U/D */
+ 3 * NDB_MAX_ACTIVE_EVENTS + /* for events in suma, I/U/D */
+ 3 * noOfTables + /* for backup, I/U/D */
+ noOfOrderedIndexes; /* for ordered indexes, C */
+ if (noOfTriggers < neededNoOfTriggers)
+ {
+ noOfTriggers= neededNoOfTriggers;
+ it2.set(CFG_DB_NO_TRIGGERS, noOfTriggers);
+ }
+ }
/**
* Do size calculations
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp
index e4cd64f5ca8..acf0e163a84 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/ndb/src/kernel/vm/Configuration.hpp
@@ -21,7 +21,6 @@
#include <ndb_types.h>
class ConfigRetriever;
-class LocalConfig;
class Configuration {
public:
@@ -33,7 +32,7 @@ public:
*/
bool init(int argc, char** argv);
- void fetch_configuration(LocalConfig &local_config);
+ void fetch_configuration();
void setupConfiguration();
void closeConfiguration();
diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/ndb/src/mgmapi/LocalConfig.cpp
index d0ff97cdedf..8f1e2ee8100 100644
--- a/ndb/src/mgmapi/LocalConfig.cpp
+++ b/ndb/src/mgmapi/LocalConfig.cpp
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <LocalConfig.hpp>
+#include "LocalConfig.hpp"
#include <NdbEnv.h>
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
@@ -294,4 +294,19 @@ LocalConfig::readConnectString(const char * connectString,
return return_value;
}
+char *
+LocalConfig::makeConnectString(char *buf, int sz)
+{
+ int p= BaseString::snprintf(buf,sz,"nodeid=%d", _ownNodeId);
+ for (int i = 0; (i < ids.size()) && (sz-p > 0); i++)
+ {
+ if (ids[i].type != MgmId_TCP)
+ continue;
+ p+=BaseString::snprintf(buf+p,sz-p,",%s:%d",
+ ids[i].name.c_str(), ids[i].port);
+ }
+ buf[sz-1]=0;
+ return buf;
+}
+
template class Vector<MgmtSrvrId>;
diff --git a/ndb/include/mgmapi/LocalConfig.hpp b/ndb/src/mgmapi/LocalConfig.hpp
index 9ceeffdba36..c415ec1be91 100644
--- a/ndb/include/mgmapi/LocalConfig.hpp
+++ b/ndb/src/mgmapi/LocalConfig.hpp
@@ -61,6 +61,7 @@ struct LocalConfig {
bool parseHostName(const char *buf);
bool parseFileName(const char *buf);
bool parseString(const char *buf, BaseString &err);
+ char * makeConnectString(char *buf, int sz);
};
#endif // LocalConfig_H
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index 51f2d7cee01..ca3a2a2186d 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -20,6 +20,7 @@
#include <LocalConfig.hpp>
#include <NdbAutoPtr.hpp>
+#include <NdbSleep.h>
#include <NdbTCP.h>
#include "mgmapi.h"
#include "mgmapi_debug.h"
@@ -83,8 +84,8 @@ typedef Parser<ParserDummy> Parser_t;
#define NDB_MGM_MAX_ERR_DESC_SIZE 256
struct ndb_mgm_handle {
- char * hostname;
- unsigned short port;
+ char * connectstring;
+ int cfg_i;
int connected;
int last_error;
@@ -95,7 +96,7 @@ struct ndb_mgm_handle {
NDB_SOCKET_TYPE socket;
- char cfg_ptr[sizeof(LocalConfig)];
+ LocalConfig cfg;
#ifdef MGMAPI_LOG
FILE* logfile;
@@ -148,14 +149,16 @@ ndb_mgm_create_handle()
h->connected = 0;
h->last_error = 0;
h->last_error_line = 0;
- h->hostname = 0;
h->socket = NDB_INVALID_SOCKET;
h->read_timeout = 50000;
h->write_timeout = 100;
-
- new (h->cfg_ptr) LocalConfig;
+ h->cfg_i = 0;
strncpy(h->last_error_desc, "No error", NDB_MGM_MAX_ERR_DESC_SIZE);
+
+ new (&(h->cfg)) LocalConfig;
+ h->cfg.init(0, 0);
+
#ifdef MGMAPI_LOG
h->logfile = 0;
#endif
@@ -163,6 +166,23 @@ ndb_mgm_create_handle()
return h;
}
+extern "C"
+int
+ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
+{
+ new (&(handle->cfg)) LocalConfig;
+ if (!handle->cfg.init(mgmsrv, 0) ||
+ handle->cfg.ids.size() == 0)
+ {
+ new (&(handle->cfg)) LocalConfig;
+ handle->cfg.init(0, 0); /* reset the LocalCongig */
+ SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, "");
+ return -1;
+ }
+ handle->cfg_i= 0;
+ return 0;
+}
+
/**
* Destroy a handle
*/
@@ -175,14 +195,13 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
if((* handle)->connected){
ndb_mgm_disconnect(* handle);
}
- my_free((* handle)->hostname,MYF(MY_ALLOW_ZERO_PTR));
#ifdef MGMAPI_LOG
if ((* handle)->logfile != 0){
fclose((* handle)->logfile);
(* handle)->logfile = 0;
}
#endif
- ((LocalConfig*)((*handle)->cfg_ptr))->~LocalConfig();
+ (*handle)->cfg.~LocalConfig();
my_free((char*)* handle,MYF(MY_ALLOW_ZERO_PTR));
* handle = 0;
}
@@ -314,7 +333,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
*/
extern "C"
int
-ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv)
+ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
+ int retry_delay_in_seconds, int verbose)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_connect");
CHECK_HANDLE(handle, -1);
@@ -331,36 +351,48 @@ ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv)
/**
* Do connect
*/
- LocalConfig *cfg= (LocalConfig*)(handle->cfg_ptr);
- new (cfg) LocalConfig;
- if (!cfg->init(mgmsrv, 0) ||
- cfg->ids.size() == 0)
- {
- SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, "");
- return -1;
- }
-
+ LocalConfig &cfg= handle->cfg;
NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET;
Uint32 i;
- for (i = 0; i < cfg->ids.size(); i++)
+ while (sockfd == NDB_INVALID_SOCKET)
{
- if (cfg->ids[i].type != MgmId_TCP)
- continue;
- SocketClient s(cfg->ids[i].name.c_str(), cfg->ids[i].port);
- sockfd = s.connect();
+ // do all the mgmt servers
+ for (i = 0; i < cfg.ids.size(); i++)
+ {
+ if (cfg.ids[i].type != MgmId_TCP)
+ continue;
+ SocketClient s(cfg.ids[i].name.c_str(), cfg.ids[i].port);
+ sockfd = s.connect();
+ if (sockfd != NDB_INVALID_SOCKET)
+ break;
+ }
if (sockfd != NDB_INVALID_SOCKET)
break;
- }
- if (sockfd == NDB_INVALID_SOCKET)
- {
- setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
- "Unable to connect using connectstring %s", mgmsrv);
- return -1;
+ if (verbose > 0) {
+ char buf[1024];
+ ndbout_c("Unable to connect with connect string: %s",
+ cfg.makeConnectString(buf,sizeof(buf)));
+ verbose= -1;
+ }
+ if (no_retries == 0) {
+ char buf[1024];
+ setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
+ "Unable to connect with connect string: %s",
+ cfg.makeConnectString(buf,sizeof(buf)));
+ return -1;
+ }
+ if (verbose == -1) {
+ ndbout << "retrying every " << retry_delay_in_seconds << " seconds:";
+ verbose= -2;
+ }
+ NdbSleep_SecSleep(retry_delay_in_seconds);
+ if (verbose == -2) {
+ ndbout << " " << no_retries;
+ }
+ no_retries--;
}
- my_free(handle->hostname,MYF(MY_ALLOW_ZERO_PTR));
- handle->hostname = my_strdup(cfg->ids[i].name.c_str(),MYF(MY_WME));
- handle->port = cfg->ids[i].port;
+ handle->cfg_i = i;
handle->socket = sockfd;
handle->connected = 1;
@@ -1068,7 +1100,9 @@ ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
};
CHECK_HANDLE(handle, -1);
- SocketClient s(handle->hostname, handle->port);
+ const char *hostname= ndb_mgm_get_connected_host(handle);
+ int port= ndb_mgm_get_connected_port(handle);
+ SocketClient s(hostname, port);
const NDB_SOCKET_TYPE sockfd = s.connect();
if (sockfd < 0) {
setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
@@ -1613,16 +1647,37 @@ ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *cfg)
extern "C"
int
-ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodeid, int nodetype)
+ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle)
{
+ CHECK_HANDLE(handle, 0);
+ return handle->cfg._ownNodeId;
+}
+
+extern "C"
+int ndb_mgm_get_connected_port(NdbMgmHandle handle)
+{
+ return handle->cfg.ids[handle->cfg_i].port;
+}
+extern "C"
+const char *ndb_mgm_get_connected_host(NdbMgmHandle handle)
+{
+ return handle->cfg.ids[handle->cfg_i].name.c_str();
+}
+
+extern "C"
+int
+ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype)
+{
CHECK_HANDLE(handle, 0);
CHECK_CONNECTED(handle, 0);
+ int nodeid= handle->cfg._ownNodeId;
+
Properties args;
args.put("version", version);
args.put("nodetype", nodetype);
- args.put("nodeid", *pnodeid);
+ args.put("nodeid", nodeid);
args.put("user", "mysqld");
args.put("password", "mysqld");
args.put("public key", "a public key");
@@ -1638,26 +1693,29 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodei
prop= ndb_mgm_call(handle, reply, "get nodeid", &args);
CHECK_REPLY(prop, -1);
- int res= -1;
+ nodeid= -1;
do {
const char * buf;
if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ const char *hostname= ndb_mgm_get_connected_host(handle);
+ unsigned port= ndb_mgm_get_connected_port(handle);
BaseString err;
err.assfmt("Could not alloc node id at %s port %d: %s",
- handle->hostname, handle->port, buf);
+ hostname, port, buf);
setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
err.c_str());
break;
}
- if(!prop->get("nodeid", pnodeid) != 0){
+ Uint32 _nodeid;
+ if(!prop->get("nodeid", &_nodeid) != 0){
ndbout_c("ERROR Message: <nodeid Unspecified>\n");
break;
}
- res= 0;
+ nodeid= _nodeid;
}while(0);
delete prop;
- return res;
+ return nodeid;
}
/*****************************************************************************
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index bdeb885ed8b..54beaa49d3f 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -153,7 +153,6 @@ private:
NdbMgmHandle m_mgmsrv;
bool connected;
- const char *host;
int try_reconnect;
#ifdef HAVE_GLOBAL_REPLICATION
NdbRepHandle m_repserver;
@@ -379,15 +378,16 @@ CommandInterpreter::CommandInterpreter(const char *_host)
m_mgmsrv = ndb_mgm_create_handle();
if(m_mgmsrv == NULL) {
ndbout_c("Cannot create handle to management server.");
+ exit(-1);
+ }
+ if (ndb_mgm_set_connectstring(m_mgmsrv, _host))
+ {
printError();
+ exit(-1);
}
connected = false;
try_reconnect = 0;
- if (_host)
- host= my_strdup(_host,MYF(MY_WME));
- else
- host= 0;
#ifdef HAVE_GLOBAL_REPLICATION
rep_host = NULL;
m_repserver = NULL;
@@ -402,8 +402,6 @@ CommandInterpreter::~CommandInterpreter()
{
connected = false;
ndb_mgm_destroy_handle(&m_mgmsrv);
- my_free((char *)host,MYF(MY_ALLOW_ZERO_PTR));
- host = NULL;
}
static bool
@@ -438,18 +436,8 @@ bool
CommandInterpreter::connect()
{
if(!connected) {
- int tries = try_reconnect; // tries == 0 => infinite
- while(!connected) {
- if(ndb_mgm_connect(m_mgmsrv, host) == -1) {
- ndbout << "Cannot connect to management server (" << host << ").";
- tries--;
- if (tries == 0)
- break;
- ndbout << "Retrying in 5 seconds." << endl;
- NdbSleep_SecSleep(5);
- } else
- connected = true;
- }
+ if(!ndb_mgm_connect(m_mgmsrv, try_reconnect-1, 5, 1))
+ connected = true;
}
return connected;
}
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index 401a9198f30..f32cc683296 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -30,9 +30,10 @@ extern "C" int add_history(const char *command); /* From readline directory */
#include <NdbMain.h>
#include <NdbHost.h>
+#include <BaseString.hpp>
+#include <NdbOut.hpp>
#include <mgmapi.h>
#include <ndb_version.h>
-#include <LocalConfig.hpp>
#include "ndb_mgmclient.hpp"
@@ -138,7 +139,7 @@ int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *_host = 0;
int _port = 0;
- const char *load_default_groups[]= { "ndb_mgm",0 };
+ const char *load_default_groups[]= { "mysql_cluster","ndb_mgm",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index a49b29af275..81b5eb9dfb3 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -399,16 +399,20 @@ MgmtSrvr::getPort() const {
}
/* Constructor */
-MgmtSrvr::MgmtSrvr(NodeId nodeId,
- SocketServer *socket_server,
- const BaseString &configFilename,
- LocalConfig &local_config,
- Config * config):
+int MgmtSrvr::init()
+{
+ if ( _ownNodeId > 0)
+ return 0;
+ return -1;
+}
+
+MgmtSrvr::MgmtSrvr(SocketServer *socket_server,
+ const char *config_filename,
+ const char *connect_string) :
_blockNumber(1), // Hard coded block number since it makes it easy to send
// signals to other management servers.
m_socket_server(socket_server),
_ownReference(0),
- m_local_config(local_config),
theSignalIdleList(NULL),
theWaitState(WAIT_SUBSCRIBE_CONF),
m_statisticsListner(this)
@@ -416,6 +420,8 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
DBUG_ENTER("MgmtSrvr::MgmtSrvr");
+ _ownNodeId= 0;
+
_config = NULL;
_isStopThread = false;
@@ -426,12 +432,43 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
theFacade = 0;
m_newConfig = NULL;
- m_configFilename = configFilename;
+ m_configFilename.assign(config_filename);
m_nextConfigGenerationNumber = 0;
- _config = (config == 0 ? readConfig() : config);
-
+ m_config_retriever= new ConfigRetriever(connect_string,
+ NDB_VERSION, NDB_MGM_NODE_TYPE_MGM);
+
+ // first try to allocate nodeid from another management server
+ if(m_config_retriever->do_connect(0,0,0) == 0)
+ {
+ int tmp_nodeid= 0;
+ tmp_nodeid= m_config_retriever->allocNodeId(0 /*retry*/,0 /*delay*/);
+ if (tmp_nodeid == 0)
+ {
+ ndbout_c(m_config_retriever->getErrorString());
+ exit(-1);
+ }
+ // read config from other managent server
+ _config= fetchConfig();
+ if (_config == 0)
+ {
+ ndbout << m_config_retriever->getErrorString() << endl;
+ exit(-1);
+ }
+ _ownNodeId= tmp_nodeid;
+ }
+
+ if (_ownNodeId == 0)
+ {
+ // read config locally
+ _config= readConfig();
+ if (_config == 0) {
+ ndbout << "Unable to read config file" << endl;
+ exit(-1);
+ }
+ }
+
theMgmtWaitForResponseCondPtr = NdbCondition_Create();
m_configMutex = NdbMutex_Create();
@@ -443,9 +480,11 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
nodeTypes[i] = (enum ndb_mgm_node_type)-1;
m_connect_address[i].s_addr= 0;
}
+
{
- ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator
- (config->m_configValues, CFG_SECTION_NODE);
+ ndb_mgm_configuration_iterator
+ *iter = ndb_mgm_create_configuration_iterator(_config->m_configValues,
+ CFG_SECTION_NODE);
for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){
unsigned type, id;
if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0)
@@ -478,8 +517,6 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
}
_props = NULL;
- _ownNodeId= 0;
- NodeId tmp= nodeId;
BaseString error_string;
if ((m_node_id_mutex = NdbMutex_Create()) == 0)
@@ -488,43 +525,25 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
exit(-1);
}
-#if 0
- char my_hostname[256];
- struct sockaddr_in tmp_addr;
- SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr);
- if (!g_no_nodeid_checks) {
- if (gethostname(my_hostname, sizeof(my_hostname))) {
- ndbout << "error: gethostname() - " << strerror(errno) << endl;
- exit(-1);
- }
- if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) {
- ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - "
- << strerror(errno) << endl;
+ if (_ownNodeId == 0) // we did not get node id from other server
+ {
+ NodeId tmp= m_config_retriever->get_configuration_nodeid();
+
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
+ 0, 0, error_string)){
+ ndbout << "Unable to obtain requested nodeid: "
+ << error_string.c_str() << endl;
exit(-1);
}
+ _ownNodeId = tmp;
}
- if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
- (struct sockaddr *)&tmp_addr,
- &addrlen, error_string)){
- ndbout << "Unable to obtain requested nodeid: "
- << error_string.c_str() << endl;
- exit(-1);
- }
-#else
- if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
- 0, 0, error_string)){
- ndbout << "Unable to obtain requested nodeid: "
- << error_string.c_str() << endl;
- exit(-1);
- }
-#endif
- _ownNodeId = tmp;
{
DBUG_PRINT("info", ("verifyConfig"));
- ConfigRetriever cr(m_local_config, NDB_VERSION, NDB_MGM_NODE_TYPE_MGM);
- if (!cr.verifyConfig(config->m_configValues, _ownNodeId)) {
- ndbout << cr.getErrorString() << endl;
+ if (!m_config_retriever->verifyConfig(_config->m_configValues,
+ _ownNodeId))
+ {
+ ndbout << m_config_retriever->getErrorString() << endl;
exit(-1);
}
}
@@ -657,6 +676,8 @@ MgmtSrvr::~MgmtSrvr()
NdbThread_WaitFor(m_signalRecvThread, &res);
NdbThread_Destroy(&m_signalRecvThread);
}
+ if (m_config_retriever)
+ delete m_config_retriever;
}
//****************************************************************************
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index b3257491123..2ab11250d81 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -175,11 +175,10 @@ public:
/* Constructor */
- MgmtSrvr(NodeId nodeId, /* Local nodeid */
- SocketServer *socket_server,
- const BaseString &config_filename, /* Where to save config */
- LocalConfig &local_config, /* Ndb.cfg filename */
- Config * config);
+ MgmtSrvr(SocketServer *socket_server,
+ const char *config_filename, /* Where to save config */
+ const char *connect_string);
+ int init();
NodeId getOwnNodeId() const {return _ownNodeId;};
/**
@@ -538,7 +537,6 @@ private:
NdbMutex *m_configMutex;
const Config * _config;
Config * m_newConfig;
- LocalConfig &m_local_config;
BaseString m_configFilename;
Uint32 m_nextConfigGenerationNumber;
@@ -755,6 +753,9 @@ private:
Config *_props;
int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type);
+
+ ConfigRetriever *m_config_retriever;
+
public:
/**
* This method does not exist
diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
index 1d51061e909..6c4b4e9ae3c 100644
--- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
@@ -272,30 +272,20 @@ MgmtSrvr::saveConfig(const Config *conf) {
Config *
MgmtSrvr::readConfig() {
- Config *conf = NULL;
- if(m_configFilename.length() != 0) {
- /* Use config file */
- InitConfigFileParser parser;
- conf = parser.parseConfig(m_configFilename.c_str());
-
- if(conf == NULL) {
- /* Try to get configuration from other MGM server */
- return fetchConfig();
- }
- }
+ Config *conf;
+ InitConfigFileParser parser;
+ conf = parser.parseConfig(m_configFilename.c_str());
return conf;
}
Config *
MgmtSrvr::fetchConfig() {
- ConfigRetriever cr(m_local_config, NDB_VERSION, NODE_TYPE_MGM);
- struct ndb_mgm_configuration * tmp = cr.getConfig();
+ struct ndb_mgm_configuration * tmp = m_config_retriever->getConfig();
if(tmp != 0){
Config * conf = new Config();
conf->m_configValues = tmp;
return conf;
}
-
return 0;
}
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index b588a2d0933..84ff98626e5 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -62,7 +62,6 @@ struct MgmGlobals {
int non_interactive;
int interactive;
const char * config_filename;
- const char * local_config_filename;
/** Stuff found in environment or in local config */
NodeId localNodeId;
@@ -70,9 +69,6 @@ struct MgmGlobals {
char * interface_name;
int port;
- /** The configuration of the cluster */
- Config * cluster_config;
-
/** The Mgmt Server */
MgmtSrvr * mgmObject;
@@ -86,9 +82,6 @@ static MgmGlobals glob;
/******************************************************************************
* Function prototypes
******************************************************************************/
-static bool readLocalConfig();
-static bool readGlobalConfig();
-
/**
* Global variables
*/
@@ -100,16 +93,28 @@ static char *opt_connect_str= 0;
static struct my_option my_long_options[] =
{
- NDB_STD_OPTS("ndb_mgm"),
+#ifndef DBUG_OFF
+ { "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.",
+ 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 },
+#endif
+ { "usage", '?', "Display this help and exit.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "help", '?', "Display this help and exit.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "version", 'V', "Output version information and exit.", 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "connect-string", 1023,
+ "Set connect string for connecting to ndb_mgmd. "
+ "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". "
+ "Overides specifying entries in NDB_CONNECTSTRING and config file",
+ (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "config-file", 'f', "Specify cluster configuration file",
(gptr*) &glob.config_filename, (gptr*) &glob.config_filename, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "daemon", 'd', "Run ndb_mgmd in daemon mode (default)",
(gptr*) &glob.daemon, (gptr*) &glob.daemon, 0,
GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
- { "l", 'l', "Specify configuration file connect string (default Ndb.cfg if available)",
- (gptr*) &glob.local_config_filename, (gptr*) &glob.local_config_filename, 0,
- GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "interactive", 256, "Run interactive. Not supported but provided for testing purposes",
(gptr*) &glob.interactive, (gptr*) &glob.interactive, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
@@ -173,7 +178,7 @@ int main(int argc, char** argv)
global_mgmt_server_check = 1;
glob.config_filename= "config.ini";
- const char *load_default_groups[]= { "ndb_mgmd",0 };
+ const char *load_default_groups[]= { "mysql_cluster","ndb_mgmd",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
@@ -189,29 +194,16 @@ int main(int argc, char** argv)
MgmApiService * mapi = new MgmApiService();
- /****************************
- * Read configuration files *
- ****************************/
- LocalConfig local_config;
- if(!local_config.init(opt_connect_str,glob.local_config_filename)){
- local_config.printError();
- goto error_end;
- }
- glob.localNodeId = local_config._ownNodeId;
+ glob.mgmObject = new MgmtSrvr(glob.socketServer,
+ glob.config_filename,
+ opt_connect_str);
- if (!readGlobalConfig())
+ if (glob.mgmObject->init())
goto error_end;
- glob.mgmObject = new MgmtSrvr(glob.localNodeId, glob.socketServer,
- BaseString(glob.config_filename),
- local_config,
- glob.cluster_config);
-
chdir(NdbConfig_get_path(0));
- glob.cluster_config = 0;
glob.localNodeId= glob.mgmObject->getOwnNodeId();
-
if (glob.localNodeId == 0) {
goto error_end;
}
@@ -322,9 +314,7 @@ MgmGlobals::MgmGlobals(){
// Default values
port = 0;
config_filename = NULL;
- local_config_filename = NULL;
interface_name = 0;
- cluster_config = 0;
daemon = 1;
non_interactive = 0;
interactive = 0;
@@ -337,27 +327,6 @@ MgmGlobals::~MgmGlobals(){
delete socketServer;
if (mgmObject)
delete mgmObject;
- if (cluster_config)
- delete cluster_config;
if (interface_name)
free(interface_name);
}
-
-/**
- * @fn readGlobalConfig
- * @param glob : Global variables
- * @return true if success, false otherwise.
- */
-static bool
-readGlobalConfig() {
- if(glob.config_filename == NULL)
- return false;
-
- /* Use config file */
- InitConfigFileParser parser;
- glob.cluster_config = parser.parseConfig(glob.config_filename);
- if(glob.cluster_config == 0){
- return false;
- }
- return true;
-}
diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbConnection.cpp
index 4f6468eb4ae..f4bb000300a 100644
--- a/ndb/src/ndbapi/NdbConnection.cpp
+++ b/ndb/src/ndbapi/NdbConnection.cpp
@@ -55,6 +55,7 @@ NdbConnection::NdbConnection( Ndb* aNdb ) :
theFirstExecOpInList(NULL),
theLastExecOpInList(NULL),
theCompletedFirstOp(NULL),
+ theCompletedLastOp(NULL),
theNoOfOpSent(0),
theNoOfOpCompleted(0),
theNoOfOpFetched(0),
@@ -124,6 +125,7 @@ NdbConnection::init()
theLastExecOpInList = NULL;
theCompletedFirstOp = NULL;
+ theCompletedLastOp = NULL;
theGlobalCheckpointId = 0;
theCommitStatus = Started;
@@ -256,6 +258,8 @@ NdbConnection::handleExecuteCompletion()
if (tLastExecOp != NULL) {
tLastExecOp->next(theCompletedFirstOp);
theCompletedFirstOp = tFirstExecOp;
+ if (theCompletedLastOp == NULL)
+ theCompletedLastOp = tLastExecOp;
theFirstExecOpInList = NULL;
theLastExecOpInList = NULL;
}//if
@@ -292,6 +296,8 @@ NdbConnection::execute(ExecType aTypeOfExec,
ExecType tExecType;
NdbOperation* tPrepOp;
+ NdbOperation* tCompletedFirstOp = NULL;
+ NdbOperation* tCompletedLastOp = NULL;
int ret = 0;
do {
@@ -314,6 +320,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
}
tPrepOp = tPrepOp->next();
}
+
// save rest of prepared ops if batch
NdbOperation* tRestOp= 0;
NdbOperation* tLastOp= 0;
@@ -323,6 +330,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
tLastOp = theLastOpInList;
theLastOpInList = tPrepOp;
}
+
if (tExecType == Commit) {
NdbOperation* tOp = theCompletedFirstOp;
while (tOp != NULL) {
@@ -338,6 +346,19 @@ NdbConnection::execute(ExecType aTypeOfExec,
}
}
+ // completed ops are in unspecified order
+ if (theCompletedFirstOp != NULL) {
+ if (tCompletedFirstOp == NULL) {
+ tCompletedFirstOp = theCompletedFirstOp;
+ tCompletedLastOp = theCompletedLastOp;
+ } else {
+ tCompletedLastOp->next(theCompletedFirstOp);
+ tCompletedLastOp = theCompletedLastOp;
+ }
+ theCompletedFirstOp = NULL;
+ theCompletedLastOp = NULL;
+ }
+
if (executeNoBlobs(tExecType, abortOption, forceSend) == -1)
ret = -1;
#ifndef VM_TRACE
@@ -362,6 +383,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
tOp = tOp->next();
}
}
+
// add saved prepared ops if batch
if (tPrepOp != NULL && tRestOp != NULL) {
if (theFirstOpInList == NULL)
@@ -373,6 +395,18 @@ NdbConnection::execute(ExecType aTypeOfExec,
assert(theFirstOpInList == NULL || tExecType == NoCommit);
} while (theFirstOpInList != NULL || tExecType != aTypeOfExec);
+ if (tCompletedFirstOp != NULL) {
+ tCompletedLastOp->next(theCompletedFirstOp);
+ theCompletedFirstOp = tCompletedFirstOp;
+ if (theCompletedLastOp == NULL)
+ theCompletedLastOp = tCompletedLastOp;
+ }
+#if ndb_api_count_completed_ops_after_blob_execute
+ { NdbOperation* tOp; unsigned n = 0;
+ for (tOp = theCompletedFirstOp; tOp != NULL; tOp = tOp->next()) n++;
+ ndbout << "completed ops: " << n << endl;
+ }
+#endif
DBUG_RETURN(ret);
}
@@ -894,6 +928,7 @@ NdbConnection::releaseOperations()
releaseOps(theFirstExecOpInList);
theCompletedFirstOp = NULL;
+ theCompletedLastOp = NULL;
theFirstOpInList = NULL;
theFirstExecOpInList = NULL;
theLastOpInList = NULL;
@@ -909,6 +944,7 @@ NdbConnection::releaseCompletedOperations()
{
releaseOps(theCompletedFirstOp);
theCompletedFirstOp = NULL;
+ theCompletedLastOp = NULL;
}//NdbConnection::releaseOperations()
/******************************************************************************
@@ -1086,8 +1122,11 @@ NdbConnection::getNdbIndexScanOperation(const NdbIndexImpl* index,
if (indexTable != 0){
NdbIndexScanOperation* tOp =
getNdbScanOperation((NdbTableImpl *) indexTable);
- tOp->m_currentTable = table;
- if(tOp) tOp->m_cursor_type = NdbScanOperation::IndexCursor;
+ if(tOp)
+ {
+ tOp->m_currentTable = table;
+ tOp->m_cursor_type = NdbScanOperation::IndexCursor;
+ }
return tOp;
} else {
setOperationErrorCodeAbort(theNdb->theError.code);
@@ -1582,9 +1621,6 @@ from other transactions.
/**
* There's always a TCKEYCONF when using IgnoreError
*/
-#ifdef VM_TRACE
- ndbout_c("Not completing transaction 2");
-#endif
return -1;
}
/**********************************************************************/
@@ -1836,9 +1872,6 @@ NdbConnection::OpCompleteFailure(Uint8 abortOption, bool setFailure)
/**
* There's always a TCKEYCONF when using IgnoreError
*/
-#ifdef VM_TRACE
- ndbout_c("Not completing transaction");
-#endif
return -1;
}
diff --git a/ndb/src/ndbapi/NdbConnectionScan.cpp b/ndb/src/ndbapi/NdbConnectionScan.cpp
index 3fe8993a42b..a1a220caacf 100644
--- a/ndb/src/ndbapi/NdbConnectionScan.cpp
+++ b/ndb/src/ndbapi/NdbConnectionScan.cpp
@@ -57,12 +57,18 @@ NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
if(checkState_TransId(&ref->transId1)){
theScanningOp->theError.code = ref->errorCode;
+ theScanningOp->execCLOSE_SCAN_REP();
if(!ref->closeNeeded){
- theScanningOp->execCLOSE_SCAN_REP();
return 0;
}
- assert(theScanningOp->m_sent_receivers_count);
+
+ /**
+ * Setup so that close_impl will actually perform a close
+ * and not "close scan"-optimze it away
+ */
theScanningOp->m_conf_receivers_count++;
+ theScanningOp->m_conf_receivers[0] = theScanningOp->m_receivers[0];
+ theScanningOp->m_conf_receivers[0]->m_tcPtrI = ~0;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -97,7 +103,7 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
theScanningOp->execCLOSE_SCAN_REP();
return 0;
}
-
+
for(Uint32 i = 0; i<len; i += 3){
Uint32 opCount, totalLen;
Uint32 ptrI = * ops++;
@@ -109,24 +115,12 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
void * tPtr = theNdb->int2void(ptrI);
assert(tPtr); // For now
NdbReceiver* tOp = theNdb->void2rec(tPtr);
- if (tOp && tOp->checkMagicNumber()){
- if(tOp->execSCANOPCONF(tcPtrI, totalLen, opCount)){
- /**
- *
- */
- theScanningOp->receiver_delivered(tOp);
- } else if(info == ScanTabConf::EndOfData){
+ if (tOp && tOp->checkMagicNumber())
+ {
+ if (tcPtrI == RNIL && opCount == 0)
theScanningOp->receiver_completed(tOp);
- }
- }
- }
- if (conf->requestInfo & ScanTabConf::EndOfData) {
- if(theScanningOp->m_ordered)
- theScanningOp->m_api_receivers_count = 0;
- if(theScanningOp->m_api_receivers_count +
- theScanningOp->m_conf_receivers_count +
- theScanningOp->m_sent_receivers_count){
- abort();
+ else if (tOp->execSCANOPCONF(tcPtrI, totalLen, opCount))
+ theScanningOp->receiver_delivered(tOp);
}
}
return 0;
diff --git a/ndb/src/ndbapi/NdbResultSet.cpp b/ndb/src/ndbapi/NdbResultSet.cpp
index f270584d227..d9d71464026 100644
--- a/ndb/src/ndbapi/NdbResultSet.cpp
+++ b/ndb/src/ndbapi/NdbResultSet.cpp
@@ -44,10 +44,10 @@ void NdbResultSet::init()
{
}
-int NdbResultSet::nextResult(bool fetchAllowed)
+int NdbResultSet::nextResult(bool fetchAllowed, bool forceSend)
{
int res;
- if ((res = m_operation->nextResult(fetchAllowed)) == 0) {
+ if ((res = m_operation->nextResult(fetchAllowed, forceSend)) == 0) {
// handle blobs
NdbBlob* tBlob = m_operation->theBlobList;
while (tBlob != 0) {
@@ -67,9 +67,9 @@ int NdbResultSet::nextResult(bool fetchAllowed)
return res;
}
-void NdbResultSet::close()
+void NdbResultSet::close(bool forceSend)
{
- m_operation->closeScan();
+ m_operation->closeScan(forceSend);
}
NdbOperation*
@@ -98,6 +98,6 @@ NdbResultSet::deleteTuple(NdbConnection * takeOverTrans){
}
int
-NdbResultSet::restart(){
- return m_operation->restart();
+NdbResultSet::restart(bool forceSend){
+ return m_operation->restart(forceSend);
}
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index 4b10ebb10cd..db0c294708d 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -35,6 +35,8 @@
#include <signaldata/AttrInfo.hpp>
#include <signaldata/TcKeyReq.hpp>
+#define DEBUG_NEXT_RESULT 0
+
NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
NdbOperation(aNdb),
m_resultSet(0),
@@ -275,6 +277,9 @@ NdbScanOperation::fix_receivers(Uint32 parallel){
void
NdbScanOperation::receiver_delivered(NdbReceiver* tRec){
if(theError.code == 0){
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("receiver_delivered");
+
Uint32 idx = tRec->m_list_index;
Uint32 last = m_sent_receivers_count - 1;
if(idx != last){
@@ -298,6 +303,9 @@ NdbScanOperation::receiver_delivered(NdbReceiver* tRec){
void
NdbScanOperation::receiver_completed(NdbReceiver* tRec){
if(theError.code == 0){
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("receiver_completed");
+
Uint32 idx = tRec->m_list_index;
Uint32 last = m_sent_receivers_count - 1;
if(idx != last){
@@ -445,12 +453,12 @@ NdbScanOperation::executeCursor(int nodeId){
return -1;
}
-#define DEBUG_NEXT_RESULT 0
-int NdbScanOperation::nextResult(bool fetchAllowed)
+int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
{
if(m_ordered)
- return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed);
+ return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed,
+ forceSend);
/**
* Check current receiver
@@ -487,7 +495,8 @@ int NdbScanOperation::nextResult(bool fetchAllowed)
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
Uint32 seq = theNdbCon->theNodeSequence;
- if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false) == 0){
+ if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false,
+ forceSend) == 0){
idx = m_current_api_receiver;
last = m_api_receivers_count;
@@ -578,8 +587,9 @@ int NdbScanOperation::nextResult(bool fetchAllowed)
}
int
-NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){
- if(cnt > 0 || stopScanFlag){
+NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag,
+ bool forceSend){
+ if(cnt > 0){
NdbApiSignal tSignal(theNdb->theMyRef);
tSignal.setSignal(GSN_SCAN_NEXTREQ);
@@ -595,38 +605,57 @@ NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){
*/
Uint32 last = m_sent_receivers_count;
Uint32 * prep_array = (cnt > 21 ? m_prepared_receivers : theData + 4);
+ Uint32 sent = 0;
for(Uint32 i = 0; i<cnt; i++){
NdbReceiver * tRec = m_api_receivers[i];
- m_sent_receivers[last+i] = tRec;
- tRec->m_list_index = last+i;
- prep_array[i] = tRec->m_tcPtrI;
- tRec->prepareSend();
+ if((prep_array[sent] = tRec->m_tcPtrI) != RNIL)
+ {
+ m_sent_receivers[last+sent] = tRec;
+ tRec->m_list_index = last+sent;
+ tRec->prepareSend();
+ sent++;
+ }
}
- memcpy(&m_api_receivers[0], &m_api_receivers[cnt], cnt * sizeof(char*));
+ memmove(m_api_receivers, m_api_receivers+cnt,
+ (theParallelism-cnt) * sizeof(char*));
- Uint32 nodeId = theNdbCon->theDBnode;
- TransporterFacade * tp = TransporterFacade::instance();
- int ret;
- if(cnt > 21){
- tSignal.setLength(4);
- LinearSectionPtr ptr[3];
- ptr[0].p = prep_array;
- ptr[0].sz = cnt;
- ret = tp->sendSignal(&tSignal, nodeId, ptr, 1);
- } else {
- tSignal.setLength(4+cnt);
- ret = tp->sendSignal(&tSignal, nodeId);
+ int ret = 0;
+ if(sent)
+ {
+ Uint32 nodeId = theNdbCon->theDBnode;
+ TransporterFacade * tp = TransporterFacade::instance();
+ if(cnt > 21){
+ tSignal.setLength(4);
+ LinearSectionPtr ptr[3];
+ ptr[0].p = prep_array;
+ ptr[0].sz = sent;
+ ret = tp->sendSignal(&tSignal, nodeId, ptr, 1);
+ } else {
+ tSignal.setLength(4+sent);
+ ret = tp->sendSignal(&tSignal, nodeId);
+ }
}
+
+ if (!ret) checkForceSend(forceSend);
- m_sent_receivers_count = last + cnt + stopScanFlag;
+ m_sent_receivers_count = last + sent;
m_api_receivers_count -= cnt;
m_current_api_receiver = 0;
-
+
return ret;
}
return 0;
}
+void NdbScanOperation::checkForceSend(bool forceSend)
+{
+ if (forceSend) {
+ TransporterFacade::instance()->forceSend(theNdb->theNdbBlockNumber);
+ } else {
+ TransporterFacade::instance()->checkForceSend(theNdb->theNdbBlockNumber);
+ }//if
+}
+
int
NdbScanOperation::prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId)
{
@@ -642,7 +671,7 @@ NdbScanOperation::doSend(int ProcessorId)
return 0;
}
-void NdbScanOperation::closeScan()
+void NdbScanOperation::closeScan(bool forceSend)
{
if(m_transConnection){
if(DEBUG_NEXT_RESULT)
@@ -657,7 +686,7 @@ void NdbScanOperation::closeScan()
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
- close_impl(tp);
+ close_impl(tp, forceSend);
} while(0);
@@ -673,6 +702,7 @@ NdbScanOperation::execCLOSE_SCAN_REP(){
m_api_receivers_count = 0;
m_conf_receivers_count = 0;
m_sent_receivers_count = 0;
+ m_current_api_receiver = m_ordered ? theParallelism : 0;
}
void NdbScanOperation::release()
@@ -1293,7 +1323,8 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
}
int
-NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){
+NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
+ bool forceSend){
Uint32 u_idx = 0, u_last = 0;
Uint32 s_idx = m_current_api_receiver; // first sorted
@@ -1319,7 +1350,8 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){
Guard guard(tp->theMutexPtr);
Uint32 seq = theNdbCon->theNodeSequence;
Uint32 nodeId = theNdbCon->theDBnode;
- if(seq == tp->getNodeSequence(nodeId) && !send_next_scan_ordered(s_idx)){
+ if(seq == tp->getNodeSequence(nodeId) &&
+ !send_next_scan_ordered(s_idx, forceSend)){
Uint32 tmp = m_sent_receivers_count;
s_idx = m_current_api_receiver;
while(m_sent_receivers_count > 0 && !theError.code){
@@ -1408,14 +1440,26 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){
}
int
-NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){
+NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx, bool forceSend){
if(idx == theParallelism)
return 0;
+ NdbReceiver* tRec = m_api_receivers[idx];
NdbApiSignal tSignal(theNdb->theMyRef);
tSignal.setSignal(GSN_SCAN_NEXTREQ);
+ Uint32 last = m_sent_receivers_count;
Uint32* theData = tSignal.getDataPtrSend();
+ Uint32* prep_array = theData + 4;
+
+ m_current_api_receiver = idx + 1;
+ if((prep_array[0] = tRec->m_tcPtrI) == RNIL)
+ {
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("receiver completed, don't send");
+ return 0;
+ }
+
theData[0] = theNdbCon->theTCConPtr;
theData[1] = 0;
Uint64 transId = theNdbCon->theTransactionId;
@@ -1425,35 +1469,35 @@ NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){
/**
* Prepare ops
*/
- Uint32 last = m_sent_receivers_count;
- Uint32 * prep_array = theData + 4;
-
- NdbReceiver * tRec = m_api_receivers[idx];
m_sent_receivers[last] = tRec;
tRec->m_list_index = last;
- prep_array[0] = tRec->m_tcPtrI;
tRec->prepareSend();
-
m_sent_receivers_count = last + 1;
- m_current_api_receiver = idx + 1;
Uint32 nodeId = theNdbCon->theDBnode;
TransporterFacade * tp = TransporterFacade::instance();
tSignal.setLength(4+1);
- return tp->sendSignal(&tSignal, nodeId);
+ int ret= tp->sendSignal(&tSignal, nodeId);
+ if (!ret) checkForceSend(forceSend);
+ return ret;
}
int
-NdbScanOperation::close_impl(TransporterFacade* tp){
+NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){
Uint32 seq = theNdbCon->theNodeSequence;
Uint32 nodeId = theNdbCon->theDBnode;
- if(seq != tp->getNodeSequence(nodeId)){
+ if(seq != tp->getNodeSequence(nodeId))
+ {
theNdbCon->theReleaseOnClose = true;
return -1;
}
- while(theError.code == 0 && m_sent_receivers_count){
+ /**
+ * Wait for outstanding
+ */
+ while(theError.code == 0 && m_sent_receivers_count)
+ {
theNdb->theWaiter.m_node = nodeId;
theNdb->theWaiter.m_state = WAIT_SCAN;
int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
@@ -1471,18 +1515,52 @@ NdbScanOperation::close_impl(TransporterFacade* tp){
}
}
- if(m_api_receivers_count+m_conf_receivers_count){
- // Send close scan
- if(send_next_scan(0, true) == -1){ // Close scan
- theNdbCon->theReleaseOnClose = true;
- return -1;
- }
+ /**
+ * move all conf'ed into api
+ * so that send_next_scan can check if they needs to be closed
+ */
+ Uint32 api = m_api_receivers_count;
+ Uint32 conf = m_conf_receivers_count;
+
+ if(m_ordered)
+ {
+ /**
+ * Ordered scan, keep the m_api_receivers "to the right"
+ */
+ memmove(m_api_receivers, m_api_receivers+m_current_api_receiver,
+ (theParallelism - m_current_api_receiver) * sizeof(char*));
+ api = (theParallelism - m_current_api_receiver);
+ m_api_receivers_count = api;
+ }
+
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("close_impl: [order api conf sent curr parr] %d %d %d %d %d %d",
+ m_ordered, api, conf,
+ m_sent_receivers_count, m_current_api_receiver, theParallelism);
+
+ if(api+conf)
+ {
+ /**
+ * There's something to close
+ * setup m_api_receivers (for send_next_scan)
+ */
+ memcpy(m_api_receivers+api, m_conf_receivers, conf * sizeof(char*));
+ m_api_receivers_count = api + conf;
+ m_conf_receivers_count = 0;
+ }
+
+ // Send close scan
+ if(send_next_scan(api+conf, true, forceSend) == -1)
+ {
+ theNdbCon->theReleaseOnClose = true;
+ return -1;
}
/**
* wait for close scan conf
*/
- while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count){
+ while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count)
+ {
theNdb->theWaiter.m_node = nodeId;
theNdb->theWaiter.m_state = WAIT_SCAN;
int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
@@ -1499,6 +1577,7 @@ NdbScanOperation::close_impl(TransporterFacade* tp){
return -1;
}
}
+
return 0;
}
@@ -1520,7 +1599,7 @@ NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){
}
int
-NdbScanOperation::restart()
+NdbScanOperation::restart(bool forceSend)
{
TransporterFacade* tp = TransporterFacade::instance();
@@ -1529,7 +1608,7 @@ NdbScanOperation::restart()
{
int res;
- if((res= close_impl(tp)))
+ if((res= close_impl(tp, forceSend)))
{
return res;
}
@@ -1548,13 +1627,13 @@ NdbScanOperation::restart()
}
int
-NdbIndexScanOperation::reset_bounds(){
+NdbIndexScanOperation::reset_bounds(bool forceSend){
int res;
{
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
- res= close_impl(tp);
+ res= close_impl(tp, forceSend);
}
if(!res)
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 4c42fe1aeef..b2043b2c2c1 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -45,7 +45,6 @@ Ndb_cluster_connection::Ndb_cluster_connection(const char *connect_string)
else
m_connect_string= 0;
m_config_retriever= 0;
- m_local_config= 0;
m_connect_thread= 0;
m_connect_callback= 0;
@@ -125,38 +124,31 @@ int Ndb_cluster_connection::connect(int reconnect)
do {
if (m_config_retriever == 0)
{
- if (m_local_config == 0) {
- m_local_config= new LocalConfig();
- if (!m_local_config->init(m_connect_string,0)) {
- ndbout_c("Configuration error: Unable to retrieve local config");
- m_local_config->printError();
- m_local_config->printUsage();
- DBUG_RETURN(-1);
- }
- }
m_config_retriever=
- new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API);
+ new ConfigRetriever(m_connect_string, NDB_VERSION, NODE_TYPE_API);
+ if (m_config_retriever->hasError())
+ {
+ printf("Could not connect initialize handle to management server",
+ m_config_retriever->getErrorString());
+ DBUG_RETURN(-1);
+ }
}
else
if (reconnect == 0)
DBUG_RETURN(0);
if (reconnect)
{
- int r= m_config_retriever->do_connect(1);
+ int r= m_config_retriever->do_connect(0,0,0);
if (r == 1)
DBUG_RETURN(1); // mgmt server not up yet
if (r == -1)
break;
}
else
- if(m_config_retriever->do_connect() == -1)
+ if(m_config_retriever->do_connect(12,5,1) == -1)
break;
- Uint32 nodeId = m_config_retriever->allocNodeId();
- for(Uint32 i = 0; nodeId == 0 && i<5; i++){
- NdbSleep_SecSleep(3);
- nodeId = m_config_retriever->allocNodeId();
- }
+ Uint32 nodeId = m_config_retriever->allocNodeId(4/*retries*/,3/*delay*/);
if(nodeId == 0)
break;
ndb_mgm_configuration * props = m_config_retriever->getConfig();
@@ -200,8 +192,6 @@ Ndb_cluster_connection::~Ndb_cluster_connection()
my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR));
if (m_config_retriever)
delete m_config_retriever;
- if (m_local_config)
- delete m_local_config;
DBUG_VOID_RETURN;
}
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index e08b80f2433..bc49358cc63 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -426,7 +426,8 @@ ErrorBundle ErrorCodes[] = {
{ 4267, IE, "Corrupted blob value" },
{ 4268, IE, "Error in blob head update forced rollback of transaction" },
{ 4268, IE, "Unknown blob error" },
- { 4269, IE, "No connection to ndb management server" }
+ { 4269, IE, "No connection to ndb management server" },
+ { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" }
};
static
diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp
index 19e4cb43336..b833f2ac629 100644
--- a/ndb/test/include/HugoTransactions.hpp
+++ b/ndb/test/include/HugoTransactions.hpp
@@ -36,15 +36,21 @@ public:
bool allowConstraintViolation = true,
int doSleep = 0,
bool oneTrans = false);
+
int scanReadRecords(Ndb*,
int records,
int abort = 0,
int parallelism = 0,
- bool committed = false);
- int scanReadCommittedRecords(Ndb*,
- int records,
- int abort = 0,
- int parallelism = 0);
+ NdbOperation::LockMode = NdbOperation::LM_Read);
+
+ int scanReadRecords(Ndb*,
+ const NdbDictionary::Index*,
+ int records,
+ int abort = 0,
+ int parallelism = 0,
+ NdbOperation::LockMode = NdbOperation::LM_Read,
+ bool sorted = false);
+
int pkReadRecords(Ndb*,
int records,
int batchsize = 1,
diff --git a/ndb/test/include/NdbRestarter.hpp b/ndb/test/include/NdbRestarter.hpp
index 114726f6a2b..19a88b4f8ad 100644
--- a/ndb/test/include/NdbRestarter.hpp
+++ b/ndb/test/include/NdbRestarter.hpp
@@ -87,8 +87,6 @@ protected:
bool connected;
BaseString addr;
- BaseString host;
- int port;
NdbMgmHandle handle;
ndb_mgm_configuration * m_config;
protected:
diff --git a/ndb/test/include/UtilTransactions.hpp b/ndb/test/include/UtilTransactions.hpp
index 37cd99550a5..23902f3b317 100644
--- a/ndb/test/include/UtilTransactions.hpp
+++ b/ndb/test/include/UtilTransactions.hpp
@@ -53,11 +53,11 @@ public:
int selectCount(Ndb*,
int parallelism = 0,
int* count_rows = NULL,
- ScanLock lock = SL_Read,
+ NdbOperation::LockMode lm = NdbOperation::LM_CommittedRead,
NdbConnection* pTrans = NULL);
int scanReadRecords(Ndb*,
int parallelism,
- bool exclusive,
+ NdbOperation::LockMode lm,
int records,
int noAttribs,
int* attrib_list,
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index efa0811aa39..4b532856709 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -22,6 +22,7 @@
#include <NdbMain.h>
#include <NdbOut.hpp>
#include <NdbTest.hpp>
+#include <NdbTick.h>
struct Bcol {
bool m_nullable;
@@ -59,6 +60,9 @@ struct Opt {
bool m_oneblob;
Bcol m_blob1;
Bcol m_blob2;
+ // perf
+ const char* m_tnameperf;
+ unsigned m_rowsperf;
// bugs
int m_bug;
int (*m_bugtest)();
@@ -84,6 +88,9 @@ struct Opt {
m_oneblob(false),
m_blob1(false, 7, 1137, 10),
m_blob2(true, 99, 55, 1),
+ // perf
+ m_tnameperf("TBLOB2"),
+ m_rowsperf(10000),
// bugs
m_bug(0),
m_bugtest(0) {
@@ -107,6 +114,7 @@ printusage()
<< " -loop N loop N times 0=forever [" << d.m_loop << "]" << endl
<< " -parts N max parts in blob value [" << d.m_parts << "]" << endl
<< " -rows N number of rows [" << d.m_rows << "]" << endl
+ << " -rowsperf N rows for performace test [" << d.m_rowsperf << "]" << endl
<< " -seed N random seed 0=loop number [" << d.m_seed << "]" << endl
<< " -skip xxx skip given tests (see list) [no tests]" << endl
<< " -test xxx only given tests (see list) [all tests]" << endl
@@ -118,6 +126,7 @@ printusage()
<< " i hash index ops" << endl
<< " s table scans" << endl
<< " r ordered index scans" << endl
+ << " p performance test" << endl
<< "additional flags for test/skip" << endl
<< " u update existing blob value" << endl
<< " n normal insert and update" << endl
@@ -1381,6 +1390,292 @@ testmain()
return 0;
}
+// separate performance test
+
+struct Tmr { // stolen from testOIBasic
+ Tmr() {
+ clr();
+ }
+ void clr() {
+ m_on = m_ms = m_cnt = m_time[0] = m_text[0] = 0;
+ }
+ void on() {
+ assert(m_on == 0);
+ m_on = NdbTick_CurrentMillisecond();
+ }
+ void off(unsigned cnt = 0) {
+ NDB_TICKS off = NdbTick_CurrentMillisecond();
+ assert(m_on != 0 && off >= m_on);
+ m_ms += off - m_on;
+ m_cnt += cnt;
+ m_on = 0;
+ }
+ const char* time() {
+ if (m_cnt == 0)
+ sprintf(m_time, "%u ms", m_ms);
+ else
+ sprintf(m_time, "%u ms per %u ( %u ms per 1000 )", m_ms, m_cnt, (1000 * m_ms) / m_cnt);
+ return m_time;
+ }
+ const char* pct (const Tmr& t1) {
+ if (0 < t1.m_ms)
+ sprintf(m_text, "%u pct", (100 * m_ms) / t1.m_ms);
+ else
+ sprintf(m_text, "[cannot measure]");
+ return m_text;
+ }
+ const char* over(const Tmr& t1) {
+ if (0 < t1.m_ms) {
+ if (t1.m_ms <= m_ms)
+ sprintf(m_text, "%u pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms);
+ else
+ sprintf(m_text, "-%u pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms);
+ } else
+ sprintf(m_text, "[cannot measure]");
+ return m_text;
+ }
+ NDB_TICKS m_on;
+ unsigned m_ms;
+ unsigned m_cnt;
+ char m_time[100];
+ char m_text[100];
+};
+
+static int
+testperf()
+{
+ if (! testcase('p'))
+ return 0;
+ DBG("=== perf test ===");
+ g_ndb = new Ndb("TEST_DB");
+ CHK(g_ndb->init() == 0);
+ CHK(g_ndb->waitUntilReady() == 0);
+ g_dic = g_ndb->getDictionary();
+ NdbDictionary::Table tab(g_opt.m_tnameperf);
+ if (g_dic->getTable(tab.getName()) != 0)
+ CHK(g_dic->dropTable(tab) == 0);
+ // col A - pk
+ { NdbDictionary::Column col("A");
+ col.setType(NdbDictionary::Column::Unsigned);
+ col.setPrimaryKey(true);
+ tab.addColumn(col);
+ }
+ // col B - char 20
+ { NdbDictionary::Column col("B");
+ col.setType(NdbDictionary::Column::Char);
+ col.setLength(20);
+ col.setNullable(true);
+ tab.addColumn(col);
+ }
+ // col C - text
+ { NdbDictionary::Column col("C");
+ col.setType(NdbDictionary::Column::Text);
+ col.setInlineSize(20);
+ col.setPartSize(512);
+ col.setStripeSize(1);
+ col.setNullable(true);
+ tab.addColumn(col);
+ }
+ // create
+ CHK(g_dic->createTable(tab) == 0);
+ Uint32 cA = 0, cB = 1, cC = 2;
+ // timers
+ Tmr t1;
+ Tmr t2;
+ // insert char (one trans)
+ {
+ DBG("--- insert char ---");
+ t1.on();
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
+ CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
+ CHK(g_opr->insertTuple() == 0);
+ CHK(g_opr->equal(cA, (char*)&k) == 0);
+ CHK(g_opr->setValue(cB, "b") == 0);
+ CHK(g_con->execute(NoCommit) == 0);
+ }
+ t1.off(g_opt.m_rowsperf);
+ CHK(g_con->execute(Rollback) == 0);
+ DBG(t1.time());
+ g_opr = 0;
+ g_con = 0;
+ }
+ // insert text (one trans)
+ {
+ DBG("--- insert text ---");
+ t2.on();
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
+ CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
+ CHK(g_opr->insertTuple() == 0);
+ CHK(g_opr->equal(cA, (char*)&k) == 0);
+ CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
+ CHK((g_bh1->setValue("c", 1) == 0));
+ CHK(g_con->execute(NoCommit) == 0);
+ }
+ t2.off(g_opt.m_rowsperf);
+ CHK(g_con->execute(Rollback) == 0);
+ DBG(t2.time());
+ g_bh1 = 0;
+ g_opr = 0;
+ g_con = 0;
+ }
+ // insert overhead
+ DBG("insert overhead: " << t2.over(t1));
+ t1.clr();
+ t2.clr();
+ // insert
+ {
+ DBG("--- insert for read test ---");
+ unsigned n = 0;
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
+ CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
+ CHK(g_opr->insertTuple() == 0);
+ CHK(g_opr->equal(cA, (char*)&k) == 0);
+ CHK(g_opr->setValue(cB, "b") == 0);
+ CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
+ CHK((g_bh1->setValue("c", 1) == 0));
+ if (++n == g_opt.m_batch) {
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ n = 0;
+ }
+ }
+ if (n != 0) {
+ CHK(g_con->execute(Commit) == 0);
+ n = 0;
+ }
+ g_bh1 = 0;
+ g_opr = 0;
+ g_con = 0;
+ }
+ // pk read char (one trans)
+ {
+ DBG("--- pk read char ---");
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ Uint32 a;
+ char b[20];
+ t1.on();
+ for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
+ CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
+ CHK(g_opr->readTuple() == 0);
+ CHK(g_opr->equal(cA, (char*)&k) == 0);
+ CHK(g_opr->getValue(cA, (char*)&a) != 0);
+ CHK(g_opr->getValue(cB, b) != 0);
+ a = (Uint32)-1;
+ b[0] = 0;
+ CHK(g_con->execute(NoCommit) == 0);
+ CHK(a == k && strcmp(b, "b") == 0);
+ }
+ CHK(g_con->execute(Commit) == 0);
+ t1.off(g_opt.m_rowsperf);
+ DBG(t1.time());
+ g_opr = 0;
+ g_con = 0;
+ }
+ // pk read text (one trans)
+ {
+ DBG("--- pk read text ---");
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ Uint32 a;
+ char c[20];
+ t2.on();
+ for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
+ CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
+ CHK(g_opr->readTuple() == 0);
+ CHK(g_opr->equal(cA, (char*)&k) == 0);
+ CHK(g_opr->getValue(cA, (char*)&a) != 0);
+ CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
+ a = (Uint32)-1;
+ c[0] = 0;
+ CHK(g_con->execute(NoCommit) == 0);
+ Uint32 m = 20;
+ CHK(g_bh1->readData(c, m) == 0);
+ CHK(a == k && m == 1 && strcmp(c, "c") == 0);
+ }
+ CHK(g_con->execute(Commit) == 0);
+ t2.off(g_opt.m_rowsperf);
+ DBG(t2.time());
+ g_opr = 0;
+ g_con = 0;
+ }
+ // pk read overhead
+ DBG("pk read overhead: " << t2.over(t1));
+ t1.clr();
+ t2.clr();
+ // scan read char
+ {
+ DBG("--- scan read char ---");
+ NdbResultSet* rs;
+ Uint32 a;
+ char b[20];
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
+ CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0);
+ CHK(g_ops->getValue(cA, (char*)&a) != 0);
+ CHK(g_ops->getValue(cB, b) != 0);
+ CHK(g_con->execute(NoCommit) == 0);
+ unsigned n = 0;
+ t1.on();
+ while (1) {
+ a = (Uint32)-1;
+ b[0] = 0;
+ int ret;
+ CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ CHK(a < g_opt.m_rowsperf && strcmp(b, "b") == 0);
+ n++;
+ }
+ CHK(n == g_opt.m_rowsperf);
+ t1.off(g_opt.m_rowsperf);
+ DBG(t1.time());
+ g_ops = 0;
+ g_con = 0;
+ }
+ // scan read text
+ {
+ DBG("--- read text ---");
+ NdbResultSet* rs;
+ Uint32 a;
+ char c[20];
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
+ CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0);
+ CHK(g_ops->getValue(cA, (char*)&a) != 0);
+ CHK((g_bh1 = g_ops->getBlobHandle(cC)) != 0);
+ CHK(g_con->execute(NoCommit) == 0);
+ unsigned n = 0;
+ t2.on();
+ while (1) {
+ a = (Uint32)-1;
+ c[0] = 0;
+ int ret;
+ CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ Uint32 m = 20;
+ CHK(g_bh1->readData(c, m) == 0);
+ CHK(a < g_opt.m_rowsperf && m == 1 && strcmp(c, "c") == 0);
+ n++;
+ }
+ CHK(n == g_opt.m_rowsperf);
+ t2.off(g_opt.m_rowsperf);
+ DBG(t2.time());
+ g_bh1 = 0;
+ g_ops = 0;
+ g_con = 0;
+ }
+ // scan read overhead
+ DBG("scan read overhead: " << t2.over(t1));
+ t1.clr();
+ t2.clr();
+ delete g_ndb;
+ return 0;
+}
+
// bug tests
static int
@@ -1498,6 +1793,12 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
continue;
}
}
+ if (strcmp(arg, "-rowsperf") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_rowsperf = atoi(argv[0]);
+ continue;
+ }
+ }
if (strcmp(arg, "-seed") == 0) {
if (++argv, --argc > 0) {
g_opt.m_seed = atoi(argv[0]);
@@ -1558,7 +1859,7 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
strcat(b, "r");
g_opt.m_skip = strdup(b);
}
- if (testmain() == -1) {
+ if (testmain() == -1 || testperf() == -1) {
ndbout << "line " << __LINE__ << " FAIL loop=" << g_loop << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index 712ab2e4d25..0a43bb02fff 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -1480,8 +1480,10 @@ runTestDictionaryPerf(NDBT_Context* ctx, NDBT_Step* step){
}
int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){
+ static int acclst[] = { 3001 };
static int tuplst[] = { 4007, 4008, 4009, 4010, 4011, 4012 };
static int tuxlst[] = { 12001, 12002, 12003, 12004, 12005, 12006 };
+ static unsigned acccnt = sizeof(acclst)/sizeof(acclst[0]);
static unsigned tupcnt = sizeof(tuplst)/sizeof(tuplst[0]);
static unsigned tuxcnt = sizeof(tuxlst)/sizeof(tuxlst[0]);
@@ -1509,6 +1511,19 @@ int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){
(void)pDic->dropTable(tab.getName());
for (int l = 0; l < loops; l++) {
+ for (unsigned i0 = 0; i0 < acccnt; i0++) {
+ unsigned j = (l == 0 ? i0 : myRandom48(acccnt));
+ int errval = acclst[j];
+ g_info << "insert error node=" << nodeId << " value=" << errval << endl;
+ CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0,
+ "failed to set error insert");
+ CHECK2(pDic->createTable(tab) != 0,
+ "failed to fail after error insert " << errval);
+ CHECK2(pDic->createTable(tab) == 0,
+ pDic->getNdbError());
+ CHECK2(pDic->dropTable(tab.getName()) == 0,
+ pDic->getNdbError());
+ }
for (unsigned i1 = 0; i1 < tupcnt; i1++) {
unsigned j = (l == 0 ? i1 : myRandom48(tupcnt));
int errval = tuplst[j];
@@ -1638,7 +1653,7 @@ TESTCASE("DictionaryPerf",
INITIALIZER(runTestDictionaryPerf);
}
TESTCASE("FailAddFragment",
- "Fail add fragment or attribute in TUP or TUX\n"){
+ "Fail add fragment or attribute in ACC or TUP or TUX\n"){
INITIALIZER(runFailAddFragment);
}
NDBT_TESTSUITE_END(testDict);
@@ -1650,5 +1665,3 @@ int main(int argc, const char** argv){
myRandom48Init(NdbTick_CurrentMillisecond());
return testDict.execute(argc, argv);
}
-
-
diff --git a/ndb/test/ndbapi/testReadPerf.cpp b/ndb/test/ndbapi/testReadPerf.cpp
index 380a809ad00..3adcb5a2d9b 100644
--- a/ndb/test/ndbapi/testReadPerf.cpp
+++ b/ndb/test/ndbapi/testReadPerf.cpp
@@ -391,8 +391,15 @@ run_read(){
void
print_result(){
+ int tmp = 1;
+ tmp *= g_paramters[P_RANGE].value;
+ tmp *= g_paramters[P_LOOPS].value;
+
+ int t, t2;
for(int i = 0; i<P_OP_TYPES; i++){
- g_err.println("%s avg: %u us/row", g_ops[i],
- (1000*g_times[i])/(g_paramters[P_RANGE].value*g_paramters[P_LOOPS].value));
+ g_err << g_ops[i] << " avg: "
+ << (int)((1000*g_times[i])/tmp)
+ << " us/row ("
+ << (1000 * tmp)/g_times[i] << " rows / sec)" << endl;
}
}
diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp
index 0cd30dfefde..51913e8fbf9 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/ndb/test/ndbapi/testScan.cpp
@@ -242,8 +242,9 @@ int runScanReadCommitted(NDBT_Context* ctx, NDBT_Step* step){
HugoTransactions hugoTrans(*ctx->getTab());
while (i<loops && !ctx->isTestStopped()) {
g_info << i << ": ";
- if (hugoTrans.scanReadCommittedRecords(GETNDB(step), records,
- abort, parallelism) != 0){
+ if (hugoTrans.scanReadRecords(GETNDB(step), records,
+ abort, parallelism,
+ NdbOperation::LM_CommittedRead) != 0){
return NDBT_FAILED;
}
i++;
@@ -639,7 +640,7 @@ int runCheckGetValue(NDBT_Context* ctx, NDBT_Step* step){
g_info << (unsigned)i << endl;
if(utilTrans.scanReadRecords(GETNDB(step),
parallelism,
- false,
+ NdbOperation::LM_Read,
records,
alist.attriblist[i]->numAttribs,
alist.attriblist[i]->attribs) != 0){
@@ -647,7 +648,7 @@ int runCheckGetValue(NDBT_Context* ctx, NDBT_Step* step){
}
if(utilTrans.scanReadRecords(GETNDB(step),
parallelism,
- true,
+ NdbOperation::LM_Read,
records,
alist.attriblist[i]->numAttribs,
alist.attriblist[i]->attribs) != 0){
diff --git a/ndb/test/ndbapi/testScanPerf.cpp b/ndb/test/ndbapi/testScanPerf.cpp
index 003fc67179f..ee2a92e88a9 100644
--- a/ndb/test/ndbapi/testScanPerf.cpp
+++ b/ndb/test/ndbapi/testScanPerf.cpp
@@ -39,8 +39,9 @@ struct Parameter {
#define P_LOOPS 8
#define P_CREATE 9
#define P_LOAD 10
+#define P_RESET 11
-#define P_MAX 11
+#define P_MAX 12
static
Parameter
@@ -55,7 +56,8 @@ g_paramters[] = {
{ "size", 1000000, 1, ~0 },
{ "iterations", 3, 1, ~0 },
{ "create_drop", 1, 0, 1 },
- { "data", 1, 0, 1 }
+ { "data", 1, 0, 1 },
+ { "q-reset bounds", 0, 1, 0 }
};
static Ndb* g_ndb = 0;
@@ -219,21 +221,29 @@ run_scan(){
NDB_TICKS start1, stop;
int sum_time= 0;
+ int sample_rows = 0;
+ NDB_TICKS sample_start = NdbTick_CurrentMillisecond();
+
Uint32 tot = g_paramters[P_ROWS].value;
+ if(g_paramters[P_BOUND].value == 2 || g_paramters[P_FILT].value == 2)
+ iter *= g_paramters[P_ROWS].value;
+
+ NdbScanOperation * pOp = 0;
+ NdbIndexScanOperation * pIOp = 0;
+ NdbConnection * pTrans = 0;
+ NdbResultSet * rs = 0;
+ int check = 0;
+
for(int i = 0; i<iter; i++){
start1 = NdbTick_CurrentMillisecond();
- NdbConnection * pTrans = g_ndb->startTransaction();
+ pTrans = pTrans ? pTrans : g_ndb->startTransaction();
if(!pTrans){
g_err << "Failed to start transaction" << endl;
err(g_ndb->getNdbError());
return -1;
}
- NdbScanOperation * pOp;
- NdbIndexScanOperation * pIOp;
-
- NdbResultSet * rs;
int par = g_paramters[P_PARRA].value;
int bat = g_paramters[P_BATCH].value;
NdbScanOperation::LockMode lm;
@@ -256,9 +266,17 @@ run_scan(){
assert(pOp);
rs = pOp->readTuples(lm, bat, par);
} else {
- pOp = pIOp = pTrans->getNdbIndexScanOperation(g_indexname, g_tablename);
- bool ord = g_paramters[P_ACCESS].value == 2;
- rs = pIOp->readTuples(lm, bat, par, ord);
+ if(g_paramters[P_RESET].value == 0 || pIOp == 0)
+ {
+ pOp= pIOp= pTrans->getNdbIndexScanOperation(g_indexname, g_tablename);
+ bool ord = g_paramters[P_ACCESS].value == 2;
+ rs = pIOp->readTuples(lm, bat, par, ord);
+ }
+ else
+ {
+ pIOp->reset_bounds();
+ }
+
switch(g_paramters[P_BOUND].value){
case 0: // All
break;
@@ -268,20 +286,22 @@ run_scan(){
case 2: { // 1 row
default:
assert(g_table->getNoOfPrimaryKeys() == 1); // only impl. so far
- abort();
-#if 0
int tot = g_paramters[P_ROWS].value;
int row = rand() % tot;
+#if 0
fix_eq_bound(pIOp, row);
+#else
+ pIOp->setBound((Uint32)0, NdbIndexScanOperation::BoundEQ, &row);
#endif
break;
}
}
+ if(g_paramters[P_RESET].value == 1)
+ goto execute;
}
assert(pOp);
assert(rs);
- int check = 0;
switch(g_paramters[P_FILT].value){
case 0: // All
check = pOp->interpret_exit_ok();
@@ -313,7 +333,7 @@ run_scan(){
for(int i = 0; i<g_table->getNoOfColumns(); i++){
pOp->getValue(i);
}
-
+execute:
int rows = 0;
check = pTrans->execute(NoCommit);
assert(check == 0);
@@ -334,19 +354,29 @@ run_scan(){
return -1;
}
assert(check == 1);
- g_info << "Found " << rows << " rows" << endl;
-
- pTrans->close();
-
+ if(g_paramters[P_RESET].value == 0)
+ {
+ pTrans->close();
+ pTrans = 0;
+ }
stop = NdbTick_CurrentMillisecond();
+
int time_passed= (int)(stop - start1);
- g_err.println("Time: %d ms = %u rows/sec", time_passed,
- (1000*tot)/time_passed);
+ sample_rows += rows;
sum_time+= time_passed;
+
+ if(sample_rows >= tot)
+ {
+ int sample_time = (int)(stop - sample_start);
+ g_info << "Found " << sample_rows << " rows" << endl;
+ g_err.println("Time: %d ms = %u rows/sec", sample_time,
+ (1000*sample_rows)/sample_time);
+ sample_rows = 0;
+ sample_start = stop;
+ }
}
- sum_time= sum_time / iter;
-
- g_err.println("Avg time: %d ms = %u rows/sec", sum_time,
- (1000*tot)/sum_time);
+
+ g_err.println("Avg time: %d ms = %u rows/sec", sum_time/iter,
+ (1000*tot*iter)/sum_time);
return 0;
}
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index 8d7e8a06c72..aa38fb4763c 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -222,6 +222,10 @@ max-time: 500
cmd: testScan
args: -n ScanRead488 -l 10 T6
+max-time: 500
+cmd: testScan
+args: -n ScanRead488Timeout -l 10 T6
+
max-time: 600
cmd: testScan
args: -n ScanRead40 -l 100 T2
diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp
index e5f73bc6a5c..ac7710d9546 100644
--- a/ndb/test/run-test/main.cpp
+++ b/ndb/test/run-test/main.cpp
@@ -538,15 +538,19 @@ connect_ndb_mgm(atrt_process & proc){
}
BaseString tmp = proc.m_hostname;
tmp.appfmt(":%d", proc.m_ndb_mgm_port);
- time_t start = time(0);
- const time_t max_connect_time = 30;
- do {
- if(ndb_mgm_connect(handle, tmp.c_str()) != -1){
- proc.m_ndb_mgm_handle = handle;
- return true;
- }
- sleep(1);
- } while(time(0) < (start + max_connect_time));
+
+ if (ndb_mgm_set_connectstring(handle,tmp.c_str()))
+ {
+ g_logger.critical("Unable to create parse connectstring");
+ return false;
+ }
+
+ if(ndb_mgm_connect(handle, 30, 1, 0) != -1)
+ {
+ proc.m_ndb_mgm_handle = handle;
+ return true;
+ }
+
g_logger.critical("Unable to connect to ndb mgm %s", tmp.c_str());
return false;
}
diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp
index 456bfffbb77..096f5406bbf 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/ndb/test/src/HugoTransactions.cpp
@@ -29,26 +29,175 @@ HugoTransactions::~HugoTransactions(){
deallocRows();
}
-
-int HugoTransactions::scanReadCommittedRecords(Ndb* pNdb,
+int
+HugoTransactions::scanReadRecords(Ndb* pNdb,
int records,
int abortPercent,
- int parallelism){
- return scanReadRecords(pNdb, records, abortPercent, parallelism, true);
+ int parallelism,
+ NdbOperation::LockMode lm)
+{
+
+ int retryAttempt = 0;
+ const int retryMax = 100;
+ int check, a;
+ NdbConnection *pTrans;
+ NdbScanOperation *pOp;
+
+ while (true){
+
+ if (retryAttempt >= retryMax){
+ g_err << "ERROR: has retried this operation " << retryAttempt
+ << " times, failing!" << endl;
+ return NDBT_FAILED;
+ }
+
+ pTrans = pNdb->startTransaction();
+ if (pTrans == NULL) {
+ const NdbError err = pNdb->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ ERR(err);
+ return NDBT_FAILED;
+ }
+
+ pOp = pTrans->getNdbScanOperation(tab.getName());
+ if (pOp == NULL) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ NdbResultSet * rs;
+ rs = pOp ->readTuples(lm);
+
+ if( rs == 0 ) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ check = pOp->interpret_exit_ok();
+ if( check == -1 ) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ for(a = 0; a<tab.getNoOfColumns(); a++){
+ if((row.attributeStore(a) =
+ pOp->getValue(tab.getColumn(a)->getName())) == 0) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+ }
+
+ check = pTrans->execute(NoCommit);
+ if( check == -1 ) {
+ const NdbError err = pTrans->getNdbError();
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ pNdb->closeTransaction(pTrans);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ ERR(err);
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ // Abort after 1-100 or 1-records rows
+ int ranVal = rand();
+ int abortCount = ranVal % (records == 0 ? 100 : records);
+ bool abortTrans = false;
+ if (abort > 0){
+ // Abort if abortCount is less then abortPercent
+ if (abortCount < abortPercent)
+ abortTrans = true;
+ }
+
+ int eof;
+ int rows = 0;
+ while((eof = rs->nextResult(true)) == 0){
+ rows++;
+ if (calc.verifyRowValues(&row) != 0){
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ if (abortCount == rows && abortTrans == true){
+ ndbout << "Scan is aborted" << endl;
+ g_info << "Scan is aborted" << endl;
+ rs->close();
+ if( check == -1 ) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ pNdb->closeTransaction(pTrans);
+ return NDBT_OK;
+ }
+ }
+ if (eof == -1) {
+ const NdbError err = pTrans->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR_INFO(err);
+ pNdb->closeTransaction(pTrans);
+ NdbSleep_MilliSleep(50);
+ switch (err.code){
+ case 488:
+ case 245:
+ case 490:
+ // Too many active scans, no limit on number of retry attempts
+ break;
+ default:
+ retryAttempt++;
+ }
+ continue;
+ }
+ ERR(err);
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ pNdb->closeTransaction(pTrans);
+
+ g_info << rows << " rows have been read" << endl;
+ if (records != 0 && rows != records){
+ g_err << "Check expected number of records failed" << endl
+ << " expected=" << records <<", " << endl
+ << " read=" << rows << endl;
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+ }
+ return NDBT_FAILED;
}
int
HugoTransactions::scanReadRecords(Ndb* pNdb,
+ const NdbDictionary::Index * pIdx,
int records,
int abortPercent,
int parallelism,
- bool committed){
+ NdbOperation::LockMode lm,
+ bool sorted)
+{
int retryAttempt = 0;
const int retryMax = 100;
int check, a;
NdbConnection *pTrans;
- NdbScanOperation *pOp;
+ NdbIndexScanOperation *pOp;
while (true){
@@ -72,7 +221,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbScanOperation(tab.getName());
+ pOp = pTrans->getNdbIndexScanOperation(pIdx->getName(), tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -80,8 +229,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
}
NdbResultSet * rs;
- rs = pOp ->readTuples(committed ? NdbScanOperation::LM_CommittedRead :
- NdbScanOperation::LM_Read);
+ rs = pOp ->readTuples(lm, 0, parallelism, sorted);
if( rs == 0 ) {
ERR(pTrans->getNdbError());
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index 367223f8c98..1434617c988 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -839,9 +839,9 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab,
continue;
}
pTab2 = pDict->getTable(pTab->getName());
- } else {
+ } else if(!pTab2) {
pTab2 = pTab;
- }
+ }
ctx = new NDBT_Context();
ctx->setTab(pTab2);
diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp
index 1ce48d495a5..09f52bf0bed 100644
--- a/ndb/test/src/NdbBackup.cpp
+++ b/ndb/test/src/NdbBackup.cpp
@@ -69,28 +69,19 @@ NdbBackup::getBackupDataDirForNode(int _node_id){
/**
* Fetch configuration from management server
*/
- LocalConfig lc;
- if (!lc.init(0,0)) {
- abort();
- }
- ConfigRetriever cr(lc, 0, NODE_TYPE_API);
- ndb_mgm_configuration * p = 0;
+ ndb_mgm_configuration *p;
+ if (connect())
+ return NULL;
- BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port);
- NdbMgmHandle handle = ndb_mgm_create_handle();
- if(handle == 0 || ndb_mgm_connect(handle, tmp.c_str()) != 0 ||
- (p = ndb_mgm_get_configuration(handle, 0)) == 0){
-
- const char * s = 0;
- if(p == 0 && handle != 0){
- s = ndb_mgm_get_latest_error_msg(handle);
- if(s == 0)
- s = "No error given!";
+ if ((p = ndb_mgm_get_configuration(handle, 0)) == 0)
+ {
+ const char * s= ndb_mgm_get_latest_error_msg(handle);
+ if(s == 0)
+ s = "No error given!";
- ndbout << "Could not fetch configuration" << endl;
- ndbout << s << endl;
- return NULL;
- }
+ ndbout << "Could not fetch configuration" << endl;
+ ndbout << s << endl;
+ return NULL;
}
/**
@@ -155,13 +146,14 @@ NdbBackup::execRestore(bool _restore_data,
ndbout << "scp res: " << res << endl;
- BaseString::snprintf(buf, 255, "%sndb_restore -c \"host=%s\" -n %d -b %d %s %s .",
+ BaseString::snprintf(buf, 255, "%sndb_restore -c \"%s:%d\" -n %d -b %d %s %s .",
#if 1
"",
#else
"valgrind --leak-check=yes -v "
#endif
- addr.c_str(),
+ ndb_mgm_get_connected_host(handle),
+ ndb_mgm_get_connected_port(handle),
_node_id,
_backup_id,
_restore_data?"-r":"",
diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp
index 4d6d3ddc001..91c0963feae 100644
--- a/ndb/test/src/NdbRestarter.cpp
+++ b/ndb/test/src/NdbRestarter.cpp
@@ -18,7 +18,6 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include <NdbTick.h>
-#include <LocalConfig.hpp>
#include <mgmapi_debug.h>
#include <NDBT_Output.hpp>
#include <random.h>
@@ -33,42 +32,11 @@
NdbRestarter::NdbRestarter(const char* _addr):
connected(false),
- port(-1),
handle(NULL),
m_config(0)
{
if (_addr == NULL){
- LocalConfig lcfg;
- if(!lcfg.init()){
- lcfg.printError();
- lcfg.printUsage();
- g_err << "NdbRestarter - Error parsing local config file" << endl;
- return;
- }
-
- if (lcfg.ids.size() == 0){
- g_err << "NdbRestarter - No management servers configured in local config file" << endl;
- return;
- }
-
- for (int i = 0; i<lcfg.ids.size(); i++){
- MgmtSrvrId * m = &lcfg.ids[i];
-
- switch(m->type){
- case MgmId_TCP:
- char buf[255];
- snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port);
- addr.assign(buf);
- host.assign(m->name.c_str());
- port = m->port;
- return;
- break;
- case MgmId_File:
- break;
- default:
- break;
- }
- }
+ addr.assign("");
} else {
addr.assign(_addr);
}
@@ -391,13 +359,22 @@ NdbRestarter::isConnected(){
int
NdbRestarter::connect(){
+ disconnect();
handle = ndb_mgm_create_handle();
if (handle == NULL){
g_err << "handle == NULL" << endl;
return -1;
}
g_info << "Connecting to mgmsrv at " << addr.c_str() << endl;
- if (ndb_mgm_connect(handle, addr.c_str()) == -1) {
+ if (ndb_mgm_set_connectstring(handle,addr.c_str()))
+ {
+ MGMERR(handle);
+ g_err << "Connection to " << addr.c_str() << " failed" << endl;
+ return -1;
+ }
+
+ if (ndb_mgm_connect(handle, 0, 0, 0) == -1)
+ {
MGMERR(handle);
g_err << "Connection to " << addr.c_str() << " failed" << endl;
return -1;
diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp
index c0e6effd244..869f7fc76cb 100644
--- a/ndb/test/src/UtilTransactions.cpp
+++ b/ndb/test/src/UtilTransactions.cpp
@@ -619,7 +619,7 @@ UtilTransactions::addRowToInsert(Ndb* pNdb,
int
UtilTransactions::scanReadRecords(Ndb* pNdb,
int parallelism,
- bool exclusive,
+ NdbOperation::LockMode lm,
int records,
int noAttribs,
int *attrib_list,
@@ -669,10 +669,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(exclusive ?
- NdbScanOperation::LM_Exclusive :
- NdbScanOperation::LM_Read,
- 0, parallelism);
+ NdbResultSet * rs = pOp->readTuples(lm, 0, parallelism);
if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -761,7 +758,7 @@ int
UtilTransactions::selectCount(Ndb* pNdb,
int parallelism,
int* count_rows,
- ScanLock lock,
+ NdbOperation::LockMode lm,
NdbConnection* pTrans){
int retryAttempt = 0;
@@ -785,19 +782,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs;
- switch(lock){
- case SL_ReadHold:
- rs = pOp->readTuples(NdbScanOperation::LM_Read);
- break;
- case SL_Exclusive:
- rs = pOp->readTuples(NdbScanOperation::LM_Exclusive);
- break;
- case SL_Read:
- default:
- rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead);
- }
-
+ NdbResultSet * rs = pOp->readTuples(lm);
if( rs == 0) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp
index 75a657522f6..6e4c5377f4a 100644
--- a/ndb/test/tools/create_index.cpp
+++ b/ndb/test/tools/create_index.cpp
@@ -30,7 +30,7 @@ main(int argc, const char** argv){
const char* _dbname = "TEST_DB";
int _help = 0;
- int _ordered, _pk;
+ int _ordered = 0, _pk = 1;
struct getargs args[] = {
{ "database", 'd', arg_string, &_dbname, "dbname",
diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp
index cdfdcea4654..42180207a8a 100644
--- a/ndb/test/tools/hugoScanRead.cpp
+++ b/ndb/test/tools/hugoScanRead.cpp
@@ -35,13 +35,17 @@ int main(int argc, const char** argv){
int _parallelism = 1;
const char* _tabname = NULL;
int _help = 0;
-
+ int lock = NdbOperation::LM_Read;
+ int sorted = 0;
+
struct getargs args[] = {
{ "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" },
{ "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" },
{ "parallelism", 'p', arg_integer, &_parallelism, "parallelism(1-240)", "para" },
{ "records", 'r', arg_integer, &_records, "Number of records", "recs" },
- { "usage", '?', arg_flag, &_help, "Print help", "" }
+ { "usage", '?', arg_flag, &_help, "Print help", "" },
+ { "lock", 'm', arg_integer, &lock, "lock mode", "" },
+ { "sorted", 's', arg_flag, &sorted, "sorted", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
@@ -73,16 +77,48 @@ int main(int argc, const char** argv){
ndbout << " Table " << _tabname << " does not exist!" << endl;
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+
+ const NdbDictionary::Index * pIdx = 0;
+ if(optind+1 < argc)
+ {
+ pIdx = MyNdb.getDictionary()->getIndex(argv[optind+1], _tabname);
+ if(!pIdx)
+ ndbout << " Index " << argv[optind+1] << " not found" << endl;
+ else
+ if(pIdx->getType() != NdbDictionary::Index::UniqueOrderedIndex &&
+ pIdx->getType() != NdbDictionary::Index::OrderedIndex)
+ {
+ ndbout << " Index " << argv[optind+1] << " is not scannable" << endl;
+ pIdx = 0;
+ }
+ }
HugoTransactions hugoTrans(*pTab);
int i = 0;
while (i<_loops || _loops==0) {
ndbout << i << ": ";
- if(hugoTrans.scanReadRecords(&MyNdb,
- 0,
- _abort,
- _parallelism) != 0){
- return NDBT_ProgramExit(NDBT_FAILED);
+ if(!pIdx)
+ {
+ if(hugoTrans.scanReadRecords(&MyNdb,
+ 0,
+ _abort,
+ _parallelism,
+ (NdbOperation::LockMode)lock) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ }
+ else
+ {
+ if(hugoTrans.scanReadRecords(&MyNdb, pIdx,
+ 0,
+ _abort,
+ _parallelism,
+ (NdbOperation::LockMode)lock,
+ sorted) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
}
i++;
}
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index a4fd73a5128..046ac8005d2 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -67,7 +67,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp
index 8f7a2031ef0..c5e9efdfa8a 100644
--- a/ndb/tools/desc.cpp
+++ b/ndb/tools/desc.cpp
@@ -67,7 +67,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp
index 1d4b454682f..6600811e0c4 100644
--- a/ndb/tools/drop_index.cpp
+++ b/ndb/tools/drop_index.cpp
@@ -64,7 +64,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp
index 3362c7de47b..0661a8c599b 100644
--- a/ndb/tools/drop_tab.cpp
+++ b/ndb/tools/drop_tab.cpp
@@ -64,7 +64,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp
index 05e864a35c4..b923207a4fe 100644
--- a/ndb/tools/listTables.cpp
+++ b/ndb/tools/listTables.cpp
@@ -220,7 +220,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char* _tabname;
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/restore/restore_main.cpp b/ndb/tools/restore/restore_main.cpp
index c43791c6723..409ebd54764 100644
--- a/ndb/tools/restore/restore_main.cpp
+++ b/ndb/tools/restore/restore_main.cpp
@@ -143,7 +143,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
bool
readArguments(int *pargc, char*** pargv)
{
- const char *load_default_groups[]= { "ndb_tools","ndb_restore",0 };
+ const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
load_defaults("my",load_default_groups,pargc,pargv);
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp
index 758c1e48c88..5efeed485a4 100644
--- a/ndb/tools/select_all.cpp
+++ b/ndb/tools/select_all.cpp
@@ -50,7 +50,7 @@ static struct my_option my_long_options[] =
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "parallelism", 'p', "parallelism",
(gptr*) &_parallelism, (gptr*) &_parallelism, 0,
- GET_INT, REQUIRED_ARG, 240, 0, 0, 0, 0, 0 },
+ GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)",
(gptr*) &_lock, (gptr*) &_lock, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
@@ -105,7 +105,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
const char* _tabname;
int ho_error;
@@ -133,13 +133,18 @@ int main(int argc, char** argv){
const NdbDictionary::Table* pTab = NDBT_Table::discoverTableFromDb(&MyNdb, _tabname);
const NdbDictionary::Index * pIdx = 0;
if(argc > 1){
- pIdx = MyNdb.getDictionary()->getIndex(argv[0], _tabname);
+ pIdx = MyNdb.getDictionary()->getIndex(argv[1], _tabname);
}
if(pTab == NULL){
ndbout << " Table " << _tabname << " does not exist!" << endl;
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+
+ if(argc > 1 && pIdx == 0)
+ {
+ ndbout << " Index " << argv[1] << " does not exists" << endl;
+ }
if(_order && pIdx == NULL){
ndbout << " Order flag given without an index" << endl;
diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp
index 6ee49ddbff0..c3491f842d8 100644
--- a/ndb/tools/select_count.cpp
+++ b/ndb/tools/select_count.cpp
@@ -83,7 +83,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp
index e24164ea807..5973b046f8f 100644
--- a/ndb/tools/waiter.cpp
+++ b/ndb/tools/waiter.cpp
@@ -23,7 +23,6 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include <kernel/ndb_limits.h>
-#include <LocalConfig.hpp>
#include <NDBT.hpp>
@@ -75,7 +74,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *load_default_groups[]= { "ndb_tools",0 };
+ const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
const char* _hostName = NULL;
int ho_error;
@@ -85,39 +84,8 @@ int main(int argc, char** argv){
char buf[255];
_hostName = argv[0];
- if (_hostName == NULL){
- LocalConfig lcfg;
- if(!lcfg.init(opt_connect_str, 0))
- {
- lcfg.printError();
- lcfg.printUsage();
- g_err << "Error parsing local config file" << endl;
- return NDBT_ProgramExit(NDBT_FAILED);
- }
-
- for (unsigned i = 0; i<lcfg.ids.size();i++)
- {
- MgmtSrvrId * m = &lcfg.ids[i];
-
- switch(m->type){
- case MgmId_TCP:
- snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port);
- _hostName = buf;
- break;
- case MgmId_File:
- break;
- default:
- break;
- }
- if (_hostName != NULL)
- break;
- }
- if (_hostName == NULL)
- {
- g_err << "No management servers configured in local config file" << endl;
- return NDBT_ProgramExit(NDBT_FAILED);
- }
- }
+ if (_hostName == 0)
+ _hostName= opt_connect_str;
if (_no_contact) {
if (waitClusterStatus(_hostName, NDB_MGM_NODE_STATUS_NO_CONTACT, _timeout) != 0)
@@ -210,13 +178,19 @@ waitClusterStatus(const char* _addr,
int _nodes[MAX_NDB_NODES];
int _num_nodes = 0;
- handle = ndb_mgm_create_handle();
+ handle = ndb_mgm_create_handle();
if (handle == NULL){
g_err << "handle == NULL" << endl;
return -1;
}
g_info << "Connecting to mgmsrv at " << _addr << endl;
- if (ndb_mgm_connect(handle, _addr) == -1) {
+ if (ndb_mgm_set_connectstring(handle, _addr))
+ {
+ MGMERR(handle);
+ g_err << "Connectstring " << _addr << " invalid" << endl;
+ return -1;
+ }
+ if (ndb_mgm_connect(handle,0,0,1)) {
MGMERR(handle);
g_err << "Connection to " << _addr << " failed" << endl;
return -1;
diff --git a/sql/field.cc b/sql/field.cc
index e372a37d2f6..e8669dad406 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1863,9 +1863,9 @@ int Field_long::store(double nr)
res=0;
error= 1;
}
- else if (nr > (double) (ulong) ~0L)
+ else if (nr > (double) UINT_MAX32)
{
- res=(int32) (uint32) ~0L;
+ res= UINT_MAX32;
error= 1;
}
else
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 2574892b1fe..744128faf69 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -35,6 +35,17 @@
const char **ha_myisammrg::bas_ext() const
{ static const char *ext[]= { ".MRG", NullS }; return ext; }
+const char *ha_myisammrg::index_type(uint key_number)
+{
+ return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
+ "FULLTEXT" :
+ (table->key_info[key_number].flags & HA_SPATIAL) ?
+ "SPATIAL" :
+ (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
+ "RTREE" :
+ "BTREE");
+}
+
int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
{
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
index 264c580220c..6058c32c805 100644
--- a/sql/ha_myisammrg.h
+++ b/sql/ha_myisammrg.h
@@ -32,6 +32,7 @@ class ha_myisammrg: public handler
~ha_myisammrg() {}
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
+ const char *index_type(uint key_number);
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a9537db39c7..a77d9cf05af 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -796,7 +796,8 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
error= create_unique_index(unique_index_name, key_info);
break;
case UNIQUE_INDEX:
- error= create_unique_index(unique_index_name, key_info);
+ if (!(error= check_index_fields_not_null(i)))
+ error= create_unique_index(unique_index_name, key_info);
break;
case ORDERED_INDEX:
error= create_ordered_index(index_name, key_info);
@@ -848,6 +849,26 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
ORDERED_INDEX);
}
+int ha_ndbcluster::check_index_fields_not_null(uint inx)
+{
+ KEY* key_info= table->key_info + inx;
+ KEY_PART_INFO* key_part= key_info->key_part;
+ KEY_PART_INFO* end= key_part+key_info->key_parts;
+ DBUG_ENTER("check_index_fields_not_null");
+
+ for (; key_part != end; key_part++)
+ {
+ Field* field= key_part->field;
+ if (field->maybe_null())
+ {
+ my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX),
+ MYF(0),field->field_name);
+ DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
void ha_ndbcluster::release_metadata()
{
@@ -1247,7 +1268,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
m_ops_pending= 0;
m_blobs_pending= FALSE;
}
- check= cursor->nextResult(contact_ndb);
+ check= cursor->nextResult(contact_ndb, m_force_send);
if (check == 0)
{
// One more record found
@@ -1540,7 +1561,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_ASSERT(op->getSorted() == sorted);
DBUG_ASSERT(op->getLockMode() ==
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type));
- if(op->reset_bounds())
+ if(op->reset_bounds(m_force_send))
DBUG_RETURN(ndb_err(m_active_trans));
}
@@ -2373,7 +2394,7 @@ int ha_ndbcluster::index_last(byte *buf)
int res;
if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){
NdbResultSet *cursor= m_active_cursor;
- while((res= cursor->nextResult(TRUE)) == 0);
+ while((res= cursor->nextResult(TRUE, m_force_send)) == 0);
if(res == 1){
unpack_record(buf);
table->status= 0;
@@ -2459,7 +2480,7 @@ int ha_ndbcluster::rnd_init(bool scan)
{
if (!scan)
DBUG_RETURN(1);
- int res= cursor->restart();
+ int res= cursor->restart(m_force_send);
DBUG_ASSERT(res == 0);
}
index_init(table->primary_key);
@@ -2490,7 +2511,7 @@ int ha_ndbcluster::close_scan()
m_ops_pending= 0;
}
- cursor->close();
+ cursor->close(m_force_send);
m_active_cursor= NULL;
DBUG_RETURN(0);
}
@@ -3014,6 +3035,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_transaction_on= FALSE;
else
m_transaction_on= thd->variables.ndb_use_transactions;
+ // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache;
m_active_trans= thd->transaction.all.ndb_tid ?
(NdbConnection*)thd->transaction.all.ndb_tid:
@@ -3740,7 +3762,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_ha_not_exact_count(FALSE),
m_force_send(TRUE),
m_autoincrement_prefetch(32),
- m_transaction_on(TRUE)
+ m_transaction_on(TRUE),
+ m_use_local_query_cache(FALSE)
{
int i;
@@ -4428,7 +4451,7 @@ bool ha_ndbcluster::low_byte_first() const
}
bool ha_ndbcluster::has_transactions()
{
- return TRUE;
+ return m_transaction_on;
}
const char* ha_ndbcluster::index_type(uint key_number)
{
@@ -4445,7 +4468,10 @@ const char* ha_ndbcluster::index_type(uint key_number)
}
uint8 ha_ndbcluster::table_cache_type()
{
- return HA_CACHE_TBL_NOCACHE;
+ if (m_use_local_query_cache)
+ return HA_CACHE_TBL_TRANSACT;
+ else
+ return HA_CACHE_TBL_NOCACHE;
}
/*
@@ -4613,13 +4639,12 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
{
DBUG_ENTER("ndb_get_table_statistics");
DBUG_PRINT("enter", ("table: %s", table));
-
+ NdbConnection* pTrans= ndb->startTransaction();
do
{
- NdbConnection* pTrans= ndb->startTransaction();
if (pTrans == NULL)
break;
-
+
NdbScanOperation* pOp= pTrans->getNdbScanOperation(table);
if (pOp == NULL)
break;
@@ -4636,13 +4661,13 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
- check= pTrans->execute(NoCommit);
+ check= pTrans->execute(NoCommit, AbortOnError, TRUE);
if (check == -1)
break;
Uint64 sum_rows= 0;
Uint64 sum_commits= 0;
- while((check= rs->nextResult(TRUE)) == 0)
+ while((check= rs->nextResult(TRUE, TRUE)) == 0)
{
sum_rows+= rows;
sum_commits+= commits;
@@ -4651,6 +4676,8 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
if (check == -1)
break;
+ rs->close(TRUE);
+
ndb->closeTransaction(pTrans);
if(row_count)
* row_count= sum_rows;
@@ -4660,6 +4687,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
DBUG_RETURN(0);
} while(0);
+ ndb->closeTransaction(pTrans);
DBUG_PRINT("exit", ("failed"));
DBUG_RETURN(-1);
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 02d7b96db20..2f18a52b8e9 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -160,7 +160,8 @@ class ha_ndbcluster: public handler
void release_metadata();
NDB_INDEX_TYPE get_index_type(uint idx_no) const;
NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const;
-
+ int check_index_fields_not_null(uint index_no);
+
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
int peek_row();
@@ -238,10 +239,12 @@ class ha_ndbcluster: public handler
char *m_blobs_buffer;
uint32 m_blobs_buffer_size;
uint m_dupkey;
+ // set from thread variables at external lock
bool m_ha_not_exact_count;
bool m_force_send;
ha_rows m_autoincrement_prefetch;
bool m_transaction_on;
+ bool m_use_local_query_cache;
void set_rec_per_key();
void records_update();
diff --git a/sql/handler.cc b/sql/handler.cc
index 3c5244927d4..b474e6290f2 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -970,8 +970,10 @@ int handler::read_first_row(byte * buf, uint primary_key)
/*
If there is very few deleted rows in the table, find the first row by
scanning the table.
+ TODO remove the test for HA_READ_ORDER
*/
- if (deleted < 10 || primary_key >= MAX_KEY)
+ if (deleted < 10 || primary_key >= MAX_KEY ||
+ !(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
{
(void) ha_rnd_init(1);
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
diff --git a/sql/item.cc b/sql/item.cc
index 542d13c9476..f8dc0c6eec8 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -160,7 +160,7 @@ void Item::rename(char *new_name)
Item_ident::Item_ident(const char *db_name_par,const char *table_name_par,
const char *field_name_par)
:orig_db_name(db_name_par), orig_table_name(table_name_par),
- orig_field_name(field_name_par),
+ orig_field_name(field_name_par), alias_name_used(FALSE),
db_name(db_name_par), table_name(table_name_par),
field_name(field_name_par), cached_field_index(NO_CACHED_FIELD_INDEX),
cached_table(0), depended_from(0)
@@ -174,6 +174,7 @@ Item_ident::Item_ident(THD *thd, Item_ident *item)
orig_db_name(item->orig_db_name),
orig_table_name(item->orig_table_name),
orig_field_name(item->orig_field_name),
+ alias_name_used(item->alias_name_used),
db_name(item->db_name),
table_name(item->table_name),
field_name(item->field_name),
@@ -631,6 +632,7 @@ void Item_field::set_field(Field *field_par)
table_name=field_par->table_name;
field_name=field_par->field_name;
db_name=field_par->table->table_cache_key;
+ alias_name_used= field_par->table->alias_name_used;
unsigned_flag=test(field_par->flags & UNSIGNED_FLAG);
collation.set(field_par->charset(), DERIVATION_IMPLICIT);
fixed= 1;
@@ -680,7 +682,8 @@ void Item_ident::print(String *str)
THD *thd= current_thd;
char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME];
const char *d_name= db_name, *t_name= table_name;
- if (lower_case_table_names)
+ if (lower_case_table_names== 1 ||
+ (lower_case_table_names == 2 && !alias_name_used))
{
if (table_name && table_name[0])
{
@@ -702,7 +705,7 @@ void Item_ident::print(String *str)
append_identifier(thd, str, nm, strlen(nm));
return;
}
- if (db_name && db_name[0])
+ if (db_name && db_name[0] && !alias_name_used)
{
append_identifier(thd, str, d_name, strlen(d_name));
str->append('.');
@@ -2959,6 +2962,10 @@ bool Item_ref::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference)
decimals= (*ref)->decimals;
collation.set((*ref)->collation);
with_sum_func= (*ref)->with_sum_func;
+ if ((*ref)->type() == FIELD_ITEM)
+ alias_name_used= ((Item_ident *) (*ref))->alias_name_used;
+ else
+ alias_name_used= TRUE; // it is not field, so it is was resolved by alias
fixed= 1;
if (ref && (*ref)->check_cols(1))
@@ -3174,16 +3181,12 @@ void Item_insert_value::print(String *str)
NOTE
This function does almost the same as fix_fields() for Item_field
but is invoked during trigger definition parsing and takes TABLE
- object as its argument.
-
- RETURN VALUES
- 0 ok
- 1 field was not found.
+ object as its argument. If proper field was not found in table
+ error will be reported at fix_fields() time.
*/
-bool Item_trigger_field::setup_field(THD *thd, TABLE *table,
+void Item_trigger_field::setup_field(THD *thd, TABLE *table,
enum trg_event_type event)
{
- bool result= 1;
uint field_idx= (uint)-1;
bool save_set_query_id= thd->set_query_id;
@@ -3197,12 +3200,9 @@ bool Item_trigger_field::setup_field(THD *thd, TABLE *table,
field= (row_version == OLD_ROW && event == TRG_EVENT_UPDATE) ?
table->triggers->old_field[field_idx] :
table->field[field_idx];
- result= 0;
}
thd->set_query_id= save_set_query_id;
-
- return result;
}
@@ -3226,10 +3226,18 @@ bool Item_trigger_field::fix_fields(THD *thd,
FIXME may be we still should bother about permissions here.
*/
DBUG_ASSERT(fixed == 0);
- // QQ: May be this should be moved to setup_field?
- set_field(field);
- fixed= 1;
- return 0;
+
+ if (field)
+ {
+ // QQ: May be this should be moved to setup_field?
+ set_field(field);
+ fixed= 1;
+ return 0;
+ }
+
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), field_name,
+ (row_version == NEW_ROW) ? "NEW" : "OLD");
+ return 1;
}
diff --git a/sql/item.h b/sql/item.h
index 75c5bd8fc80..8f6d6581884 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -269,6 +269,14 @@ public:
virtual bool get_time(TIME *ltime);
virtual bool get_date_result(TIME *ltime,uint fuzzydate)
{ return get_date(ltime,fuzzydate); }
+ /*
+ This function is used only in Item_func_isnull/Item_func_isnotnull
+ (implementations of IS NULL/IS NOT NULL clauses). Item_func_is{not}null
+ calls this method instead of one of val/result*() methods, which
+ normally will set null_value. This allows to determine nullness of
+ a complex expression without fully evaluating it.
+ Any new item which can be NULL must implement this call.
+ */
virtual bool is_null() { return 0; }
/*
it is "top level" item of WHERE clause and we do not need correct NULL
@@ -462,6 +470,7 @@ public:
const char *db_name;
const char *table_name;
const char *field_name;
+ bool alias_name_used; /* true if item was resolved against alias */
/*
Cached value of index for this field in table->field array, used by prep.
stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX
@@ -713,6 +722,8 @@ public:
void print(String *str);
/* parameter never equal to other parameter of other item */
bool eq(const Item *item, bool binary_cmp) const { return 0; }
+ bool is_null()
+ { DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; }
};
class Item_int :public Item_num
@@ -1312,13 +1323,15 @@ public:
/* Is this item represents row from NEW or OLD row ? */
enum row_version_type {OLD_ROW, NEW_ROW};
row_version_type row_version;
+ /* Next in list of all Item_trigger_field's in trigger */
+ Item_trigger_field *next_trg_field;
Item_trigger_field(row_version_type row_ver_par,
const char *field_name_par):
Item_field((const char *)NULL, (const char *)NULL, field_name_par),
row_version(row_ver_par)
{}
- bool setup_field(THD *thd, TABLE *table, enum trg_event_type event);
+ void setup_field(THD *thd, TABLE *table, enum trg_event_type event);
enum Type type() const { return TRIGGER_FIELD_ITEM; }
bool eq(const Item *item, bool binary_cmp) const;
bool fix_fields(THD *, struct st_table_list *, Item **);
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 4a4485ba2da..23bd1b503f7 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -2429,11 +2429,12 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
return FALSE;
}
int error;
- if ((error=regcomp(&preg,res->c_ptr(),
- (cmp_collation.collation->state & MY_CS_BINSORT) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE,
- cmp_collation.collation)))
+ if ((error= regcomp(&preg,res->c_ptr(),
+ ((cmp_collation.collation->state & MY_CS_BINSORT) ||
+ (cmp_collation.collation->state & MY_CS_CSSORT)) ?
+ REG_EXTENDED | REG_NOSUB :
+ REG_EXTENDED | REG_NOSUB | REG_ICASE,
+ cmp_collation.collation)))
{
(void) regerror(error,&preg,buff,sizeof(buff));
my_error(ER_REGEXP_ERROR, MYF(0), buff);
@@ -2481,10 +2482,11 @@ longlong Item_func_regex::val_int()
regex_compiled=0;
}
if (regcomp(&preg,res2->c_ptr(),
- (cmp_collation.collation->state & MY_CS_BINSORT) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE,
- cmp_collation.collation))
+ ((cmp_collation.collation->state & MY_CS_BINSORT) ||
+ (cmp_collation.collation->state & MY_CS_CSSORT)) ?
+ REG_EXTENDED | REG_NOSUB :
+ REG_EXTENDED | REG_NOSUB | REG_ICASE,
+ cmp_collation.collation))
{
null_value=1;
return 0;
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 581d82f2c2b..eb53394b96b 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -378,6 +378,15 @@ typedef struct st_sql_list {
first= save->first;
elements+= save->elements;
}
+ inline void push_back(struct st_sql_list *save)
+ {
+ if (save->first)
+ {
+ *next= save->first;
+ next= save->next;
+ elements+= save->elements;
+ }
+ }
} SQL_LIST;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index cd158e48b11..3001cda8b2b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -469,6 +469,7 @@ Query_cache query_cache;
#ifdef HAVE_SMEM
char *shared_memory_base_name= default_shared_memory_base_name;
bool opt_enable_shared_memory;
+HANDLE smem_event_connect_request= 0;
#endif
#include "sslopt-vars.h"
@@ -746,6 +747,15 @@ void kill_mysql(void)
CloseHandle(hEvent);
*/
}
+#ifdef HAVE_SMEM
+ /*
+ Send event to smem_event_connect_request for aborting
+ */
+ if (!SetEvent(smem_event_connect_request))
+ {
+ DBUG_PRINT("error",("Got error: %ld from SetEvent of smem_event_connect_request",GetLastError()));
+ }
+#endif
#endif
#elif defined(OS2)
pthread_cond_signal(&eventShutdown); // post semaphore
@@ -3799,7 +3809,6 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
/* file-mapping object, use for create shared memory */
HANDLE handle_connect_file_map= 0;
char *handle_connect_map= 0; // pointer on shared memory
- HANDLE event_connect_request= 0; // for start connection actions
HANDLE event_connect_answer= 0;
ulong smem_buffer_length= shared_memory_buffer_length + 4;
ulong connect_number= 1;
@@ -3820,7 +3829,7 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
*/
suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS);
strmov(suffix_pos, "CONNECT_REQUEST");
- if ((event_connect_request= CreateEvent(0,FALSE,FALSE,tmp)) == 0)
+ if ((smem_event_connect_request= CreateEvent(0,FALSE,FALSE,tmp)) == 0)
{
errmsg= "Could not create request event";
goto error;
@@ -3851,7 +3860,13 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
while (!abort_loop)
{
/* Wait a request from client */
- WaitForSingleObject(event_connect_request,INFINITE);
+ WaitForSingleObject(smem_event_connect_request,INFINITE);
+
+ /*
+ it can be after shutdown command
+ */
+ if (abort_loop)
+ goto error;
HANDLE handle_client_file_map= 0;
char *handle_client_map= 0;
@@ -3976,7 +3991,7 @@ error:
if (handle_connect_map) UnmapViewOfFile(handle_connect_map);
if (handle_connect_file_map) CloseHandle(handle_connect_file_map);
if (event_connect_answer) CloseHandle(event_connect_answer);
- if (event_connect_request) CloseHandle(event_connect_request);
+ if (smem_event_connect_request) CloseHandle(smem_event_connect_request);
decrement_handler_count();
DBUG_RETURN(0);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 3368482f28d..6392a2fee32 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -2869,10 +2869,10 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
trp->records= best_rows? best_rows : 1;
trp->index_scan_costs= best_index_scan_costs;
trp->cpk_scan= cpk_scan;
+ DBUG_PRINT("info",
+ ("Returning non-covering ROR-intersect plan: cost %g, records %lu",
+ trp->read_cost, (ulong) trp->records));
}
- DBUG_PRINT("info",
- ("Returning non-covering ROR-intersect plan: cost %g, records %lu",
- trp->read_cost, (ulong) trp->records));
DBUG_RETURN(trp);
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 79d886fb84f..46865de9314 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -2781,13 +2781,18 @@ int sql_set_variables(THD *thd, List<set_var_base> *var_list)
while ((var=it++))
{
if ((error=var->check(thd)))
- DBUG_RETURN(error);
+ goto err;
}
- if (thd->net.report_error)
- DBUG_RETURN(1);
- it.rewind();
- while ((var=it++))
- error|= var->update(thd); // Returns 0, -1 or 1
+ if (!thd->net.report_error)
+ {
+ it.rewind();
+ while ((var= it++))
+ error|= var->update(thd); // Returns 0, -1 or 1
+ }
+ else
+ error= 1;
+err:
+ free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(error);
}
diff --git a/sql/sp.cc b/sql/sp.cc
index 41ce3552292..9eff1655711 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -1154,7 +1154,6 @@ sp_change_db(THD *thd, char *name, bool no_access_check)
int length, db_length;
char *dbname=my_strdup((char*) name,MYF(MY_WME));
char path[FN_REFLEN];
- ulong db_access;
HA_CREATE_INFO create;
DBUG_ENTER("sp_change_db");
DBUG_PRINT("enter", ("db: %s, no_access_check: %d", name, no_access_check));
@@ -1175,6 +1174,8 @@ sp_change_db(THD *thd, char *name, bool no_access_check)
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (! no_access_check)
{
+ ulong db_access;
+
if (test_all_bits(thd->master_access,DB_ACLS))
db_access=DB_ACLS;
else
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 7db79128bb8..114ff0d451a 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -275,6 +275,11 @@ sp_head::init(LEX *lex)
DBUG_ENTER("sp_head::init");
lex->spcont= m_pcont= new sp_pcontext(NULL);
+ /*
+ Altough trg_table_fields list is used only in triggers we init for all
+ types of stored procedures to simplify reset_lex()/restore_lex() code.
+ */
+ lex->trg_table_fields.empty();
my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8);
m_param_begin= m_param_end= m_returns_begin= m_returns_end= m_body_begin= 0;
m_qname.str= m_db.str= m_name.str= m_params.str= m_retstr.str=
@@ -771,7 +776,7 @@ sp_head::reset_lex(THD *thd)
sublex->spcont= oldlex->spcont;
/* And trigger related stuff too */
sublex->trg_chistics= oldlex->trg_chistics;
- sublex->trg_table= oldlex->trg_table;
+ sublex->trg_table_fields.empty();
sublex->sp_lex_in_use= FALSE;
DBUG_VOID_RETURN;
}
@@ -790,6 +795,7 @@ sp_head::restore_lex(THD *thd)
// Update some state in the old one first
oldlex->ptr= sublex->ptr;
oldlex->next_state= sublex->next_state;
+ oldlex->trg_table_fields.push_back(&sublex->trg_table_fields);
// Collect some data from the sub statement lex.
sp_merge_funs(oldlex, sublex);
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 4bfe1076f65..c4d2068661c 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -439,13 +439,9 @@ public:
virtual void print(String *str);
- bool setup_field(THD *thd, TABLE *table, enum trg_event_type event)
- {
- return trigger_field.setup_field(thd, table, event);
- }
-private:
-
Item_trigger_field trigger_field;
+
+private:
Item *value;
}; // class sp_instr_trigger_field : public sp_instr
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e2a9ac5680d..8fba30e2df8 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1050,8 +1050,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table->reginfo.lock_type=TL_READ; /* Assume read */
reset:
+ if (thd->lex->need_correct_ident())
+ table->alias_name_used= my_strcasecmp(table_alias_charset,
+ table->real_name, alias);
/* Fix alias if table name changes */
- if (strcmp(table->table_name,alias))
+ if (strcmp(table->table_name, alias))
{
uint length=(uint) strlen(alias)+1;
table->table_name= (char*) my_realloc(table->table_name,length,
diff --git a/sql/sql_do.cc b/sql/sql_do.cc
index 3ca3bea743a..3f34835c2c9 100644
--- a/sql/sql_do.cc
+++ b/sql/sql_do.cc
@@ -29,6 +29,7 @@ bool mysql_do(THD *thd, List<Item> &values)
DBUG_RETURN(TRUE);
while ((value = li++))
value->val_int();
+ free_underlaid_joins(thd, &thd->lex->select_lex);
thd->clear_error(); // DO always is OK
send_ok(thd);
DBUG_RETURN(FALSE);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 7d933f9f833..63741bcb176 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -175,7 +175,6 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->duplicates= DUP_ERROR;
lex->sphead= NULL;
lex->spcont= NULL;
- lex->trg_table= NULL;
lex->proc_list.first= 0;
if (lex->spfuns.records)
@@ -1724,6 +1723,7 @@ bool st_lex::can_not_use_merged()
TRUE yes, we need only structure
FALSE no, we need data
*/
+
bool st_lex::only_view_structure()
{
switch(sql_command)
@@ -1743,6 +1743,32 @@ bool st_lex::only_view_structure()
/*
+ Should Items_ident be printed correctly
+
+ SYNOPSIS
+ need_correct_ident()
+
+ RETURN
+ TRUE yes, we need only structure
+ FALSE no, we need data
+*/
+
+
+bool st_lex::need_correct_ident()
+{
+ switch(sql_command)
+ {
+ case SQLCOM_SHOW_CREATE:
+ case SQLCOM_SHOW_TABLES:
+ case SQLCOM_CREATE_VIEW:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+
+/*
initialize limit counters
SYNOPSIS
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 8c02aa48f62..169ee4e66eb 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -754,11 +754,13 @@ typedef struct st_lex
/* Characterstics of trigger being created */
st_trg_chistics trg_chistics;
/*
- Points to table being opened when we are parsing trigger definition
- while opening table. 0 if we are parsing user provided CREATE TRIGGER
- or any other statement. Used for NEW/OLD row field lookup in trigger.
+ List of all items (Item_trigger_field objects) representing fields in
+ old/new version of row in trigger. We use this list for checking whenever
+ all such fields are valid at trigger creation time and for binding these
+ fields to TABLE object at table open (altough for latter pointer to table
+ being opened is probably enough).
*/
- TABLE *trg_table;
+ SQL_LIST trg_table_fields;
st_lex() :result(0)
{
@@ -804,6 +806,7 @@ typedef struct st_lex
bool can_use_merged();
bool can_not_use_merged();
bool only_view_structure();
+ bool need_correct_ident();
} LEX;
extern TABLE_LIST fake_time_zone_tables_list;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7089a79124d..af3392349ab 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3352,16 +3352,15 @@ create_error:
}
case SQLCOM_CREATE_FUNCTION: // UDF function
{
- sp_head *sph;
if (check_access(thd,INSERT_ACL,"mysql",0,1,0))
break;
#ifdef HAVE_DLOPEN
- if ((sph= sp_find_function(thd, lex->spname)))
+ if (sp_find_function(thd, lex->spname))
{
my_error(ER_UDF_EXISTS, MYF(0), lex->spname->m_name.str);
goto error;
}
- if (!(res = mysql_create_function(thd,&lex->udf)))
+ if (!(res = mysql_create_function(thd, &lex->udf)))
send_ok(thd);
#else
res= TRUE;
@@ -3813,35 +3812,35 @@ create_error:
else
sp= sp_find_function(thd, lex->spname);
mysql_reset_errors(thd);
- if (! sp)
- result= SP_KEY_NOT_FOUND;
- else
+ if (sp)
{
if (check_sp_definer_access(thd, sp))
goto error;
if (lex->sql_command == SQLCOM_DROP_PROCEDURE)
result= sp_drop_procedure(thd, lex->spname);
else
- {
result= sp_drop_function(thd, lex->spname);
+ }
+ else
+ {
#ifdef HAVE_DLOPEN
- if (result == SP_KEY_NOT_FOUND)
- {
- udf_func *udf = find_udf(lex->spname->m_name.str,
- lex->spname->m_name.length);
- if (udf)
+ if (lex->sql_command == SQLCOM_DROP_FUNCTION)
+ {
+ udf_func *udf = find_udf(lex->spname->m_name.str,
+ lex->spname->m_name.length);
+ if (udf)
+ {
+ if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0))
+ goto error;
+ if (!(res = mysql_drop_function(thd, &lex->spname->m_name)))
{
- if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0))
- goto error;
- if (!(res = mysql_drop_function(thd,&lex->spname->m_name)))
- {
- send_ok(thd);
- break;
- }
+ send_ok(thd);
+ break;
}
}
-#endif
}
+#endif
+ result= SP_KEY_NOT_FOUND;
}
res= result;
switch (result)
@@ -3926,11 +3925,11 @@ create_error:
}
case SQLCOM_CREATE_TRIGGER:
{
- /* We don't care much about trigger body at that point */
+ res= mysql_create_or_drop_trigger(thd, all_tables, 1);
+
+ /* We don't care about trigger body after this point */
delete lex->sphead;
lex->sphead= 0;
-
- res= mysql_create_or_drop_trigger(thd, all_tables, 1);
break;
}
case SQLCOM_DROP_TRIGGER:
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ae60eb759d0..93ed04be4b2 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -238,7 +238,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result)
/*
If we have real error reported erly then this will be ignored
*/
- result->send_error(ER_UNKNOWN_ERROR, NullS);
+ result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
result->abort();
}
DBUG_RETURN(res);
@@ -2632,11 +2632,7 @@ add_key_field(KEY_FIELD **key_fields, uint and_level, COND *cond,
bool is_const=1;
for (uint i=0; i<num_values; i++)
- /*
- TODO: This looks like a bug. It should be
- is_const&= (value[i])->const_item();
- */
- is_const&= (*value)->const_item();
+ is_const&= value[i]->const_item();
if (is_const)
stat[0].const_keys.merge(possible_keys);
/*
@@ -12905,8 +12901,9 @@ void st_table_list::print(THD *thd, String *str)
str->append('.');
if (schema_table)
{
- append_identifier(thd, str, alias, strlen(alias));
- cmp_name= alias;
+ append_identifier(thd, str, schema_table_name,
+ strlen(schema_table_name));
+ cmp_name= schema_table_name;
}
else
{
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 7fd9bf48b0b..5e42fc0ee30 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -277,8 +277,6 @@ class JOIN :public Sql_alloc
hidden_group_fields= 0; /*safety*/
buffer_result= test(select_options & OPTION_BUFFER_RESULT) &&
!test(select_options & OPTION_FOUND_ROWS);
- all_fields= fields_arg;
- fields_list= fields_arg;
error= 0;
select= 0;
return_tab= 0;
@@ -288,6 +286,7 @@ class JOIN :public Sql_alloc
optimized= 0;
cond_equal= 0;
+ all_fields= fields_arg;
fields_list= fields_arg;
bzero((char*) &keyuse,sizeof(keyuse));
tmp_table_param.copy_field=0;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 1642a2eaa17..55c38ff37c9 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -597,7 +597,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
else
{
if (table_list->schema_table)
- protocol->store(table_list->alias, system_charset_info);
+ protocol->store(table_list->schema_table_name, system_charset_info);
else
protocol->store(table->table_name, system_charset_info);
if (store_create_info(thd, table_list, &buffer))
@@ -938,7 +938,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
else
packet->append("CREATE TABLE ", 13);
if (table_list->schema_table)
- alias= table_list->alias;
+ alias= table_list->schema_table_name;
else
alias= (lower_case_table_names == 2 ? table->table_name :
table->real_name);
@@ -2193,7 +2193,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
tmp_buff= (show_table->table_charset ? show_table->
table_charset->name : "default");
table->field[17]->store(tmp_buff, strlen(tmp_buff), cs);
- if (file->table_flags() & HA_HAS_CHECKSUM)
+ if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
{
table->field[18]->store((longlong) file->checksum());
table->field[18]->set_notnull();
@@ -2282,7 +2282,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
!wild_case_compare(system_charset_info, field->field_name,wild))
{
uint tmp_length;
- char *tmp_buff;
+ const char *tmp_buff;
byte *pos;
uint flags=field->flags;
char tmp[MAX_FIELD_WIDTH];
@@ -2298,7 +2298,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
table->field[4]->store((longlong) count);
field->sql_type(type);
table->field[14]->store(type.ptr(), type.length(), cs);
- tmp_buff= strchr(type.ptr(),'(');
+ tmp_buff= strchr(type.ptr(), '(');
table->field[7]->store(type.ptr(),
(tmp_buff ? tmp_buff - type.ptr() :
type.length()), cs);
@@ -2909,12 +2909,13 @@ ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx)
0 Can't create table
*/
-TABLE *create_schema_table(THD *thd, ST_SCHEMA_TABLE *schema_table)
+TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
{
int field_count= 0;
Item *item;
TABLE *table;
List<Item> field_list;
+ ST_SCHEMA_TABLE *schema_table= table_list->schema_table;
ST_FIELD_INFO *fields_info= schema_table->fields_info;
CHARSET_INFO *cs= system_charset_info;
DBUG_ENTER("create_schema_table");
@@ -2959,8 +2960,7 @@ TABLE *create_schema_table(THD *thd, ST_SCHEMA_TABLE *schema_table)
field_list, (ORDER*) 0, 0, 0,
(select_lex->options | thd->options |
TMP_TABLE_ALL_COLUMNS),
- HA_POS_ERROR,
- (char *) schema_table->table_name)))
+ HA_POS_ERROR, table_list->real_name)))
DBUG_RETURN(0);
DBUG_RETURN(table);
}
@@ -3130,13 +3130,13 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
{
TABLE *table;
DBUG_ENTER("mysql_schema_table");
- if (!(table= table_list->schema_table->
- create_table(thd, table_list->schema_table)))
+ if (!(table= table_list->schema_table->create_table(thd, table_list)))
{
DBUG_RETURN(1);
}
table->tmp_table= TMP_TABLE;
table->grant.privilege= SELECT_ACL;
+ table_list->schema_table_name= table_list->real_name;
table_list->real_name= table->real_name;
table_list->table= table;
table->next= thd->derived_tables;
@@ -3291,14 +3291,14 @@ ST_FIELD_INFO columns_fields_info[]=
{"ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
{"COLUMN_DEFAULT", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Default"},
{"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"},
- {"DATA_TYPE", 40, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"DATA_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
{"CHARACTER_MAXIMUM_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
{"CHARACTER_OCTET_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
{"NUMERIC_PRECISION", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
{"NUMERIC_SCALE", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
{"CHARACTER_SET_NAME", 40, MYSQL_TYPE_STRING, 0, 1, 0},
{"COLLATION_NAME", 40, MYSQL_TYPE_STRING, 0, 1, "Collation"},
- {"COLUMN_TYPE", 40, MYSQL_TYPE_STRING, 0, 0, "Type"},
+ {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type"},
{"COLUMN_KEY", 3, MYSQL_TYPE_STRING, 0, 0, "Key"},
{"EXTRA", 20, MYSQL_TYPE_STRING, 0, 0, "Extra"},
{"PRIVILEGES", 80, MYSQL_TYPE_STRING, 0, 0, "Privileges"},
@@ -3356,7 +3356,7 @@ ST_FIELD_INFO proc_fields_info[]=
{"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, "Security_type"},
{"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Created"},
{"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Modified"},
- {"SQL_MODE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0},
{"ROUTINE_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment"},
{"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 5020b4820a0..a7c08f356a2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -835,7 +835,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
#endif
}
- List_iterator<key_part_spec> cols(key->columns);
+ List_iterator<key_part_spec> cols(key->columns), cols2(key->columns);
CHARSET_INFO *ft_key_charset=0; // for FULLTEXT
for (uint column_nr=0 ; (column=cols++) ; column_nr++)
{
@@ -851,6 +851,19 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
DBUG_RETURN(-1);
}
+ for (uint dup_nr= 0; dup_nr < column_nr; dup_nr++)
+ {
+ key_part_spec *dup_column= cols2++;
+ if (!my_strcasecmp(system_charset_info,
+ column->field_name, dup_column->field_name))
+ {
+ my_printf_error(ER_DUP_FIELDNAME,
+ ER(ER_DUP_FIELDNAME),MYF(0),
+ column->field_name);
+ DBUG_RETURN(-1);
+ }
+ }
+ cols2.rewind();
/* for fulltext keys keyseg length is 1 for blobs (it's ignored in
ft code anyway, and 0 (set to column width later) for char's.
it has to be correct col width for char's, as char data are not
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 7637679430f..a88dc0b20bf 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -136,6 +136,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables)
char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
LEX_STRING dir, file;
LEX_STRING *trg_def, *name;
+ Item_trigger_field *trg_field;
List_iterator_fast<LEX_STRING> it(names_list);
/* We don't allow creation of several triggers of the same type yet */
@@ -157,6 +158,31 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables)
}
/*
+ Let us check if all references to fields in old/new versions of row in
+ this trigger are ok.
+
+ NOTE: We do it here more from ease of use standpoint. We still have to
+ do some checks on each execution. E.g. we can catch privilege changes
+ only during execution. Also in near future, when we will allow access
+ to other tables from trigger we won't be able to catch changes in other
+ tables...
+
+ To simplify code a bit we have to create Fields for accessing to old row
+ values if we have ON UPDATE trigger.
+ */
+ if (!old_field && lex->trg_chistics.event == TRG_EVENT_UPDATE &&
+ prepare_old_row_accessors(table))
+ return 1;
+
+ for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first);
+ trg_field; trg_field= trg_field->next_trg_field)
+ {
+ trg_field->setup_field(thd, table, lex->trg_chistics.event);
+ if (trg_field->fix_fields(thd, (TABLE_LIST *)0, (Item **)0))
+ return 1;
+ }
+
+ /*
Here we are creating file with triggers and save all triggers in it.
sql_create_definition_file() files handles renaming and backup of older
versions
@@ -275,6 +301,44 @@ Table_triggers_list::~Table_triggers_list()
/*
+ Prepare array of Field objects which will represent OLD.* row values in
+ ON UPDATE trigger (by referencing to record[1] instead of record[0]).
+
+ SYNOPSIS
+ prepare_old_row_accessors()
+ table - pointer to TABLE object for which we are creating fields.
+
+ RETURN VALUE
+ False - success
+ True - error
+*/
+bool Table_triggers_list::prepare_old_row_accessors(TABLE *table)
+{
+ Field **fld, **old_fld;
+
+ if (!(old_field= (Field **)alloc_root(&table->mem_root,
+ (table->fields + 1) *
+ sizeof(Field*))))
+ return 1;
+
+ for (fld= table->field, old_fld= old_field; *fld; fld++, old_fld++)
+ {
+ /*
+ QQ: it is supposed that it is ok to use this function for field
+ cloning...
+ */
+ if (!(*old_fld= (*fld)->new_field(&table->mem_root, table)))
+ return 1;
+ (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] -
+ table->record[0]));
+ }
+ *old_fld= 0;
+
+ return 0;
+}
+
+
+/*
Check whenever .TRG file for table exist and load all triggers it contains.
SYNOPSIS
@@ -317,7 +381,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
if (!strncmp(triggers_file_type.str, parser->type()->str,
parser->type()->length))
{
- Field **fld, **old_fld;
Table_triggers_list *triggers=
new (&table->mem_root) Table_triggers_list();
@@ -330,31 +393,10 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
table->triggers= triggers;
- /*
- We have to prepare array of Field objects which will represent OLD.*
- row values by referencing to record[1] instead of record[0]
-
- TODO: This could be avoided if there is no ON UPDATE trigger.
- */
- if (!(triggers->old_field=
- (Field **)alloc_root(&table->mem_root, (table->fields + 1) *
- sizeof(Field*))))
+ /* TODO: This could be avoided if there is no ON UPDATE trigger. */
+ if (triggers->prepare_old_row_accessors(table))
DBUG_RETURN(1);
- for (fld= table->field, old_fld= triggers->old_field; *fld;
- fld++, old_fld++)
- {
- /*
- QQ: it is supposed that it is ok to use this function for field
- cloning...
- */
- if (!(*old_fld= (*fld)->new_field(&table->mem_root, table)))
- DBUG_RETURN(1);
- (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] -
- table->record[0]));
- }
- *old_fld= 0;
-
List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
LEX_STRING *trg_create_str, *trg_name_str;
char *trg_name_buff;
@@ -365,7 +407,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
while ((trg_create_str= it++))
{
lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length);
- lex.trg_table= table;
+
if (yyparse((void *)thd) || thd->is_fatal_error)
{
/*
@@ -400,6 +442,21 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
if (triggers->names_list.push_back(trg_name_str, &table->mem_root))
goto err_with_lex_cleanup;
+ /*
+ Let us bind Item_trigger_field objects representing access to fields
+ in old/new versions of row in trigger to Field objects in table being
+ opened.
+
+ We ignore errors here, because if even something is wrong we still will
+ be willing to open table to perform some operations (e.g. SELECT)...
+ Anyway some things can be checked only during trigger execution.
+ */
+ for (Item_trigger_field *trg_field=
+ (Item_trigger_field *)(lex.trg_table_fields.first);
+ trg_field;
+ trg_field= trg_field->next_trg_field)
+ trg_field->setup_field(thd, table, lex.trg_chistics.event);
+
lex_end(&lex);
}
thd->lex= old_lex;
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index d0376f056d9..82e7c1ce023 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -65,4 +65,7 @@ public:
}
friend class Item_trigger_field;
+
+private:
+ bool prepare_old_row_accessors(TABLE *table);
};
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index d2f02acb75b..dd9cd4af0f3 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1238,8 +1238,9 @@ create:
my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER");
YYABORT;
}
-
- sp= new sp_head();
+
+ if (!(sp= new sp_head()))
+ YYABORT;
sp->reset_thd_mem_root(YYTHD);
sp->init(lex);
@@ -6622,6 +6623,7 @@ simple_ident_q:
(!my_strcasecmp(system_charset_info, $1.str, "NEW") ||
!my_strcasecmp(system_charset_info, $1.str, "OLD")))
{
+ Item_trigger_field *trg_fld;
bool new_row= ($1.str[0]=='N' || $1.str[0]=='n');
if (lex->trg_chistics.event == TRG_EVENT_INSERT &&
@@ -6638,23 +6640,18 @@ simple_ident_q:
YYABORT;
}
- Item_trigger_field *trg_fld=
- new Item_trigger_field(new_row ? Item_trigger_field::NEW_ROW :
- Item_trigger_field::OLD_ROW,
- $3.str);
-
- if (lex->trg_table &&
- trg_fld->setup_field(thd, lex->trg_table,
- lex->trg_chistics.event))
- {
- /*
- FIXME. Far from perfect solution. See comment for
- "SET NEW.field_name:=..." for more info.
- */
- my_error(ER_BAD_FIELD_ERROR, MYF(0),
- $3.str, new_row ? "NEW": "OLD");
+ if (!(trg_fld= new Item_trigger_field(new_row ?
+ Item_trigger_field::NEW_ROW:
+ Item_trigger_field::OLD_ROW,
+ $3.str)))
YYABORT;
- }
+
+ /*
+ Let us add this item to list of all Item_trigger_field objects
+ in trigger.
+ */
+ lex->trg_table_fields.link_in_list((byte *)trg_fld,
+ (byte**)&trg_fld->next_trg_field);
$$= (Item *)trg_fld;
}
@@ -7156,28 +7153,19 @@ option_value:
/* QQ: Shouldn't this be field's default value ? */
it= new Item_null();
}
- i= new sp_instr_set_trigger_field(lex->sphead->instructions(),
- lex->spcont, $1.base_name, it);
- if (lex->trg_table && i->setup_field(YYTHD, lex->trg_table,
- lex->trg_chistics.event))
- {
- /*
- FIXME. Now we are catching this kind of errors only
- during opening tables. But this doesn't save us from most
- common user error - misspelling field name, because we
- will bark too late in this case... Moreover it is easy to
- make table unusable with such kind of error...
-
- So in future we either have to parse trigger definition
- second time during create trigger or gather all trigger
- fields in one list and perform setup_field() for them as
- separate stage.
-
- Error message also should be improved.
- */
- my_error(ER_BAD_FIELD_ERROR, MYF(0), $1.base_name, "NEW");
+
+ if (!(i= new sp_instr_set_trigger_field(
+ lex->sphead->instructions(), lex->spcont,
+ $1.base_name, it)))
YYABORT;
- }
+
+ /*
+ Let us add this item to list of all Item_trigger_field
+ objects in trigger.
+ */
+ lex->trg_table_fields.link_in_list((byte *)&i->trigger_field,
+ (byte **)&i->trigger_field.next_trg_field);
+
lex->sphead->add_instr(i);
}
else if ($1.var)
diff --git a/sql/table.h b/sql/table.h
index c942b038eae..ed9c1445cdf 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -160,6 +160,7 @@ struct st_table {
my_bool no_keyread, no_cache;
my_bool clear_query_id; /* To reset query_id for tables and cols */
my_bool auto_increment_field_not_null;
+ my_bool alias_name_used; /* true if table_name is alias */
Field *next_number_field, /* Set if next_number is activated */
*found_next_number_field, /* Set on open */
*rowid_field;
@@ -243,7 +244,7 @@ typedef struct st_schema_table
const char* table_name;
ST_FIELD_INFO *fields_info;
/* Create information_schema table */
- TABLE *(*create_table) (THD *thd, struct st_schema_table *schema_table);
+ TABLE *(*create_table) (THD *thd, struct st_table_list *table_list);
/* Fill table with data */
int (*fill_table) (THD *thd, struct st_table_list *tables, COND *cond);
/* Handle fileds for old SHOW */
@@ -286,7 +287,7 @@ typedef struct st_table_list
struct st_table_list *next_local;
/* link in a global list of all queries tables */
struct st_table_list *next_global, **prev_global;
- char *db, *alias, *real_name;
+ char *db, *alias, *real_name, *schema_table_name;
char *option; /* Used by cache index */
Item *on_expr; /* Used with outer join */
COND_EQUAL *cond_equal; /* Used with outer join */
diff --git a/strings/ctype-czech.c b/strings/ctype-czech.c
index 6f9e9f74d35..2177a18504e 100644
--- a/strings/ctype-czech.c
+++ b/strings/ctype-czech.c
@@ -589,12 +589,12 @@ static MY_COLLATION_HANDLER my_collation_latin2_czech_ci_handler =
CHARSET_INFO my_charset_latin2_czech_ci =
{
- 2,0,0, /* number */
- MY_CS_COMPILED|MY_CS_STRNXFRM, /* state */
- "latin2", /* cs name */
- "latin2_czech_cs", /* name */
- "", /* comment */
- NULL, /* tailoring */
+ 2,0,0, /* number */
+ MY_CS_COMPILED|MY_CS_STRNXFRM|MY_CS_CSSORT, /* state */
+ "latin2", /* cs name */
+ "latin2_czech_cs", /* name */
+ "", /* comment */
+ NULL, /* tailoring */
ctype_czech,
to_lower_czech,
to_upper_czech,
diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c
index 8df5b3277c1..89c876ad10c 100644
--- a/strings/ctype-uca.c
+++ b/strings/ctype-uca.c
@@ -7288,6 +7288,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs,
{
while (1)
{
+ my_bool escaped= 0;
if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr,
(const uchar*)wildend)) <= 0)
return 1;
@@ -7305,6 +7306,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs,
(const uchar*)wildend)) <= 0)
return 1;
wildstr+= scan;
+ escaped= 1;
}
if ((scan= mb_wc(cs, &s_wc, (const uchar*)str,
@@ -7312,7 +7314,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs,
return 1;
str+= scan;
- if (w_wc == (my_wc_t)w_one)
+ if (!escaped && w_wc == (my_wc_t)w_one)
{
result= 1; /* Found an anchor char */
}
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index b3097649158..ce9346eb475 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -1545,31 +1545,33 @@ int my_wildcmp_unicode(CHARSET_INFO *cs,
{
while (1)
{
+ my_bool escaped= 0;
if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr,
(const uchar*)wildend)) <= 0)
return 1;
-
- if (w_wc == (my_wc_t)escape)
- {
- wildstr+= scan;
- if ((scan= mb_wc(cs,&w_wc, (const uchar*)wildstr,
- (const uchar*)wildend)) <= 0)
- return 1;
- }
-
+
if (w_wc == (my_wc_t)w_many)
{
result= 1; /* Found an anchor char */
break;
}
-
+
wildstr+= scan;
+ if (w_wc == (my_wc_t)escape)
+ {
+ if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr,
+ (const uchar*)wildend)) <= 0)
+ return 1;
+ wildstr+= scan;
+ escaped= 1;
+ }
+
if ((scan= mb_wc(cs, &s_wc, (const uchar*)str,
- (const uchar*)str_end)) <=0)
+ (const uchar*)str_end)) <= 0)
return 1;
str+= scan;
- if (w_wc == (my_wc_t)w_one)
+ if (!escaped && w_wc == (my_wc_t)w_one)
{
result= 1; /* Found an anchor char */
}
diff --git a/strings/ctype-win1250ch.c b/strings/ctype-win1250ch.c
index b4dbda3e8ed..4ada3d47bf5 100644
--- a/strings/ctype-win1250ch.c
+++ b/strings/ctype-win1250ch.c
@@ -624,12 +624,12 @@ static MY_COLLATION_HANDLER my_collation_czech_ci_handler =
CHARSET_INFO my_charset_cp1250_czech_ci =
{
- 34,0,0, /* number */
- MY_CS_COMPILED|MY_CS_STRNXFRM, /* state */
- "cp1250", /* cs name */
- "cp1250_czech_cs", /* name */
- "", /* comment */
- NULL, /* tailoring */
+ 34,0,0, /* number */
+ MY_CS_COMPILED|MY_CS_STRNXFRM|MY_CS_CSSORT, /* state */
+ "cp1250", /* cs name */
+ "cp1250_czech_cs", /* name */
+ "", /* comment */
+ NULL, /* tailoring */
ctype_win1250ch,
to_lower_win1250ch,
to_upper_win1250ch,
diff --git a/strings/uca-dump.c b/strings/uca-dump.c
index 6836c321526..dd3b74a55e8 100644
--- a/strings/uca-dump.c
+++ b/strings/uca-dump.c
@@ -23,13 +23,14 @@ struct uca_item_st
#define MY_UCA_PSHIFT 8
#endif
+static char *pname[]= {"", "2", "3"};
+
int main(int ac, char **av)
{
char str[256];
char *weights[64];
struct uca_item_st uca[64*1024];
- size_t code, page, w;
- int pagemaxlen[MY_UCA_NPAGES];
+ size_t code, w;
int pageloaded[MY_UCA_NPAGES];
bzero(uca, sizeof(uca));
@@ -155,14 +156,20 @@ int main(int ac, char **av)
printf("#define MY_UCA_CMASK %d\n",MY_UCA_CMASK);
printf("#define MY_UCA_PSHIFT %d\n",MY_UCA_PSHIFT);
- for (w=0; w<1; w++)
+ for (w=0; w<3; w++)
{
+ size_t page;
+ int pagemaxlen[MY_UCA_NPAGES];
+
for (page=0; page < MY_UCA_NPAGES; page++)
{
size_t offs;
size_t maxnum= 0;
size_t nchars= 0;
size_t mchars;
+ size_t ndefs= 0;
+
+ pagemaxlen[page]= 0;
/*
Skip this page if no weights were loaded
@@ -183,15 +190,36 @@ int main(int ac, char **av)
code= page*MY_UCA_NCHARS+offs;
/* Calculate only non-zero weights */
- num=0;
- for (i=0; i < uca[code].num; i++)
+ for (num=0, i=0; i < uca[code].num; i++)
if (uca[code].weight[w][i])
num++;
maxnum= maxnum < num ? num : maxnum;
+
+ /* Check if default weight */
+ if (w == 1 && num == 1)
+ {
+ /* 0020 0000 ... */
+ if (uca[code].weight[w][0] == 0x0020)
+ ndefs++;
+ }
+ else if (w == 2 && num == 1)
+ {
+ /* 0002 0000 ... */
+ if (uca[code].weight[w][0] == 0x0002)
+ ndefs++;
+ }
}
maxnum++;
+ /*
+ If the page have only default weights
+ then no needs to dump it, skip.
+ */
+ if (ndefs == MY_UCA_NCHARS)
+ {
+ continue;
+ }
switch (maxnum)
{
case 0: mchars= 8; break;
@@ -210,8 +238,8 @@ int main(int ac, char **av)
*/
- printf("uint16 page%03Xdata[]= { /* %04X (%d weights per char) */\n",
- page, page*MY_UCA_NCHARS, maxnum);
+ printf("uint16 page%03Xdata%s[]= { /* %04X (%d weights per char) */\n",
+ page, pname[w], page*MY_UCA_NCHARS, maxnum);
for (offs=0; offs < MY_UCA_NCHARS; offs++)
{
@@ -234,7 +262,17 @@ int main(int ac, char **av)
for (i=0; i < maxnum; i++)
{
- printf("0x%04X",(int)weight[i]);
+ /*
+ Invert weights for secondary level to
+ sort upper case letters before their
+ lower case counter part.
+ */
+ int tmp= weight[i];
+ if (w == 2 && tmp)
+ tmp= (int)(0x20 - weight[i]);
+
+
+ printf("0x%04X", tmp);
if ((offs+1 != MY_UCA_NCHARS) || (i+1!=maxnum))
printf(",");
nchars++;
@@ -251,25 +289,28 @@ int main(int ac, char **av)
}
printf("};\n\n");
}
- }
- printf("uchar ucal[%d]={\n",MY_UCA_NPAGES);
- for (page=0; page < MY_UCA_NPAGES; page++)
- {
- printf("%d%s%s",pagemaxlen[page],page<MY_UCA_NPAGES-1?",":"",(page+1) % 16 ? "":"\n");
- }
- printf("};\n");
-
-
- printf("uint16 *ucaw[%d]={\n",MY_UCA_NPAGES);
- for (page=0; page < MY_UCA_NPAGES; page++)
- {
- if (!pageloaded[page])
- printf("NULL %s%s",page<MY_UCA_NPAGES-1?",":"", (page+1) % 4 ? "":"\n");
- else
- printf("page%03Xdata%s%s",page,page<MY_UCA_NPAGES-1?",":"", (page+1) % 4 ? "":"\n");
+ printf("uchar uca_length%s[%d]={\n", pname[w], MY_UCA_NPAGES);
+ for (page=0; page < MY_UCA_NPAGES; page++)
+ {
+ printf("%d%s%s",pagemaxlen[page],page<MY_UCA_NPAGES-1?",":"",(page+1) % 16 ? "":"\n");
+ }
+ printf("};\n");
+
+
+ printf("uint16 *uca_weight%s[%d]={\n", pname[w], MY_UCA_NPAGES);
+ for (page=0; page < MY_UCA_NPAGES; page++)
+ {
+ const char *comma= page < MY_UCA_NPAGES-1 ? "," : "";
+ const char *nline= (page+1) % 4 ? "" : "\n";
+ if (!pagemaxlen[page])
+ printf("NULL %s%s%s", w ? " ": "", comma , nline);
+ else
+ printf("page%03Xdata%s%s%s", page, pname[w], comma, nline);
+ }
+ printf("};\n");
}
- printf("};\n");
+
printf("int main(void){ return 0;};\n");
return 0;
diff --git a/tests/client_test.c b/tests/client_test.c
index eb0dd840e40..8bc945eac2c 100644
--- a/tests/client_test.c
+++ b/tests/client_test.c
@@ -694,7 +694,8 @@ static void verify_prepare_field(MYSQL_RES *result,
as utf8. Field length is calculated as number of characters * maximum
number of bytes a character can occupy.
*/
- DIE_UNLESS(field->length == length * cs->mbmaxlen);
+ if (length)
+ DIE_UNLESS(field->length == length * cs->mbmaxlen);
if (def)
DIE_UNLESS(strcmp(field->def, def) == 0);
}
@@ -7280,7 +7281,7 @@ static void test_explain_bug()
MYSQL_TYPE_STRING, 0, 0, "", 192, 0);
verify_prepare_field(result, 1, "Type", "COLUMN_TYPE",
- MYSQL_TYPE_STRING, 0, 0, "", 120, 0);
+ MYSQL_TYPE_BLOB, 0, 0, "", 0, 0);
verify_prepare_field(result, 2, "Null", "IS_NULLABLE",
MYSQL_TYPE_STRING, 0, 0, "", 9, 0);