diff options
author | unknown <matthias@three.local.lan> | 2004-11-26 10:49:14 +0100 |
---|---|---|
committer | unknown <matthias@three.local.lan> | 2004-11-26 10:49:14 +0100 |
commit | ae64db03a3073f065e38214418e1d3d2fd93f36f (patch) | |
tree | 4d1c6a7f4eca1a0ecb0f9d38e69ce7edcb6f9756 | |
parent | d15053f8e8187214a53dec279b6d85c19244d858 (diff) | |
parent | bf98c9795921bf82b96470ad89c1768f03271ed9 (diff) | |
download | mariadb-git-ae64db03a3073f065e38214418e1d3d2fd93f36f.tar.gz |
Merge mleich@bk-internal.mysql.com:/home/bk/mysql-4.1
into three.local.lan:/home/matthias/Arbeit/mysql-4.1/src
187 files changed, 4206 insertions, 3099 deletions
diff --git a/.bzrignore b/.bzrignore index e794d7d51cc..10388d79013 100644 --- a/.bzrignore +++ b/.bzrignore @@ -924,3 +924,19 @@ Docs/Images/mydsn.txt Docs/Images/myflowchart.txt mysql-test/mysql_test_run_new ndb/tools/ndb_test_platform +help +ndbcluster-1186 +ndbcluster-1186/SCCS +ndbcluster-1186/config.ini +ndbcluster-1186/ndb_1.pid +ndbcluster-1186/ndb_1_out.log +ndbcluster-1186/ndb_1_signal.log +ndbcluster-1186/ndb_2.pid +ndbcluster-1186/ndb_2_out.log +ndbcluster-1186/ndb_2_signal.log +ndbcluster-1186/ndb_3.pid +ndbcluster-1186/ndb_3_cluster.log +ndbcluster-1186/ndb_3_out.log +ndbcluster-1186/ndbcluster.pid +ndb/tools/ndb_restore +ac_available_languages_fragment diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 1c1a3efc542..0483e6b4fa3 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -101,6 +101,7 @@ lenz@mysql.com magnus@neptunus.(none) magnus@shellback.(none) marko@hundin.mysql.fi +mats@mysql.com matt@mysql.com matthias@three.local.lan miguel@hegel.(none) @@ -163,6 +164,7 @@ pem@mysql.com peter@linux.local peter@mysql.com peterg@mysql.com +petr@mysql.com pgulutzan@linux.local ram@deer.(none) ram@gw.mysql.r18.ru diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile index f3c20c81a9f..e98c3d84937 100755 --- a/Build-tools/Do-compile +++ b/Build-tools/Do-compile @@ -10,7 +10,7 @@ use Sys::Hostname; $opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env=""; $opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix=""; $opt_tmp=$opt_version_suffix=""; -$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0; +$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_one_error=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0; $opt_innodb=$opt_bdb=$opt_raid=$opt_libwrap=$opt_clearlogs=0; GetOptions( @@ -37,6 +37,7 @@ GetOptions( "no-test", "no-mysqltest", "no-benchmark", + "one-error", "perl-files=s", "perl-options=s", "raid", @@ -310,6 +311,7 @@ if ($opt_stage <= 2) $command=$make; $command.= " $opt_make_options" if (defined($opt_make_options) && $opt_make_options ne ""); safe_system($command); + print LOG "Do-compile: Build successful\n"; } # @@ -372,11 +374,14 @@ $ENV{"LD_LIBRARY_PATH"}= ("$test_dir/lib" . if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest) { my $flags= ""; + my $force= ""; $flags.= " --with-ndbcluster" if ($opt_with_cluster); + $flags.= " --force" if (!$opt_one_error); log_timestamp(); system("mkdir $bench_tmpdir") if (! -d $bench_tmpdir); safe_cd("${test_dir}/mysql-test"); check_system("./mysql-test-run $flags --tmpdir=$bench_tmpdir --master_port=$mysql_tcp_port --slave_port=$slave_port --ndbcluster_port=$ndbcluster_port --manager-port=$manager_port --no-manager --sleep=10", "tests were successful"); + # 'mysql-test-run' writes its own final message for log evaluation. } # @@ -555,7 +560,10 @@ Do not run any tests. Do not run the benchmark test (written in perl) --no-mysqltest -Do not run the the mysql-test-run test (Same as 'make test') +Do not run the mysql-test-run test (Same as 'make test') + +--one-error +Terminate the mysql-test-run test after the first difference (default: use '--force') --no-perl Do not compile or install Perl modules, use the system installed ones diff --git a/VC++Files/client/mysqladmin.dsp b/VC++Files/client/mysqladmin.dsp index a7e4404e253..7a0b3bec1a7 100644 --- a/VC++Files/client/mysqladmin.dsp +++ b/VC++Files/client/mysqladmin.dsp @@ -115,7 +115,7 @@ LINK32=xilink6.exe # Name "mysqladmin - Win32 classic" # Begin Source File -SOURCE=.\mysqladmin.c +SOURCE=.\mysqladmin.cpp # End Source File # End Target # End Project diff --git a/client/Makefile.am b/client/Makefile.am index 5034dd5bf51..fa317367f71 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -20,15 +20,14 @@ INCLUDES = -I$(top_srcdir)/include -I$(top_srcdir)/regex \ $(openssl_includes) LIBS = @CLIENT_LIBS@ -DEPLIB= @ndb_mgmclient_libs@ \ - ../libmysql/libmysqlclient.la +DEPLIB= ../libmysql/libmysqlclient.la LDADD = @CLIENT_EXTRA_LDFLAGS@ $(DEPLIB) bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow \ mysqldump mysqlimport mysqltest mysqlbinlog mysqlmanagerc mysqlmanager-pwgen noinst_HEADERS = sql_string.h completion_hash.h my_readline.h \ client_priv.h -mysqladmin_SOURCES = mysqladmin.cc mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc +mysqladmin_SOURCES = mysqladmin.cc mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS) mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS) mysql_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) $(DEPLIB) diff --git a/client/mysql.cc b/client/mysql.cc index 358b13e652b..8e9dd84c8f0 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -1609,7 +1609,7 @@ static void print_help_item(MYSQL_ROW *cur, int num_name, int num_cat, char *las char ccat= (*cur)[num_cat][0]; if (*last_char != ccat) { - put_info(ccat == 'Y' ? "categories :" : "topics :", INFO_INFO); + put_info(ccat == 'Y' ? "categories:" : "topics:", INFO_INFO); *last_char= ccat; } tee_fprintf(PAGER, " %s\n", (*cur)[num_name]); @@ -1676,8 +1676,8 @@ static int com_server_help(String *buffer __attribute__((unused)), if (num_fields == 2) { - put_info("Many help items for your request exist", INFO_INFO); - put_info("To make a more specific request, please type 'help <item>',\nwhere item is one of next", INFO_INFO); + put_info("Many help items for your request exist.", INFO_INFO); + put_info("To make a more specific request, please type 'help <item>',\nwhere item is one of the following", INFO_INFO); num_name= 0; num_cat= 1; last_char= '_'; diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index a9fc3f31d03..8491d0df7b5 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -24,7 +24,7 @@ #include <sys/stat.h> #include <mysql.h> -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB #include "../ndb/src/mgmclient/ndb_mgmclient.h" #endif @@ -45,7 +45,7 @@ static uint tcp_port = 0, option_wait = 0, option_silent=0, nr_iterations, opt_count_iterations= 0; static ulong opt_connect_timeout, opt_shutdown_timeout; static my_string unix_port=0; -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB static my_bool opt_ndbcluster=0; static char *opt_ndb_connectstring=0; #endif @@ -101,7 +101,7 @@ enum commands { ADMIN_PING, ADMIN_EXTENDED_STATUS, ADMIN_FLUSH_STATUS, ADMIN_FLUSH_PRIVILEGES, ADMIN_START_SLAVE, ADMIN_STOP_SLAVE, ADMIN_FLUSH_THREADS, ADMIN_OLD_PASSWORD -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB ,ADMIN_NDB_MGM #endif }; @@ -114,7 +114,7 @@ static const char *command_names[]= { "ping", "extended-status", "flush-status", "flush-privileges", "start-slave", "stop-slave", "flush-threads","old-password", -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB "ndb-mgm", #endif NullS @@ -197,7 +197,7 @@ static struct my_option my_long_options[] = {"shutdown_timeout", OPT_SHUTDOWN_TIMEOUT, "", (gptr*) &opt_shutdown_timeout, (gptr*) &opt_shutdown_timeout, 0, GET_ULONG, REQUIRED_ARG, SHUTDOWN_DEF_TIMEOUT, 0, 3600*12, 0, 1, 0}, -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB {"ndbcluster", OPT_NDBCLUSTER, "" "", (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -608,7 +608,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { char *pos,buff[40]; ulong sec; - pos=strchr(status,' '); + pos= (char*) strchr(status,' '); *pos++=0; printf("%s\t\t\t",status); /* print label */ if ((status=str2int(pos,10,0,LONG_MAX,(long*) &sec))) @@ -903,7 +903,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } mysql->reconnect=1; /* Automatic reconnect is default */ break; -#ifdef HAVE_NDBCLUSTER_DB +#ifdef LATER_HAVE_NDBCLUSTER_DB case ADMIN_NDB_MGM: { if (argc < 2) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 8015871428e..de53831c43d 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -44,7 +44,7 @@ static const char *load_default_groups[]= { "mysqlbinlog","client",0 }; void sql_print_error(const char *format, ...); -static bool one_database=0, to_last_remote_log= 0; +static bool one_database=0, to_last_remote_log= 0, disable_log_bin= 0; static const char* database= 0; static my_bool force_opt= 0, short_form= 0, remote_opt= 0; static ulonglong offset = 0; @@ -438,6 +438,13 @@ static struct my_option my_long_options[] = {"database", 'd', "List entries for just this database (local log only).", (gptr*) &database, (gptr*) &database, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"disable-log-bin", 'D', "Disable binary log. This is useful, if you " + "enabled --to-last-log and are sending the output to the same MySQL server. " + "This way you could avoid an endless loop. You would also like to use it " + "when restoring after a crash to avoid duplication of the statements you " + "already have. NOTE: you will need a SUPER privilege to use this option.", + (gptr*) &disable_log_bin, (gptr*) &disable_log_bin, 0, GET_BOOL, + NO_ARG, 0, 0, 0, 0, 0, 0}, {"force-read", 'f', "Force reading unknown binlog events.", (gptr*) &force_opt, (gptr*) &force_opt, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -1068,6 +1075,11 @@ int main(int argc, char** argv) fprintf(result_file, "/*!40019 SET @@session.max_insert_delayed_threads=0*/;\n"); + + if (disable_log_bin) + fprintf(result_file, + "/*!32316 SET @OLD_SQL_LOG_BIN=@@SQL_LOG_BIN, SQL_LOG_BIN=0*/;\n"); + for (save_stop_position= stop_position, stop_position= ~(my_off_t)0 ; (--argc >= 0) && !stop_passed ; ) { @@ -1082,6 +1094,9 @@ int main(int argc, char** argv) start_position= BIN_LOG_HEADER_SIZE; } + if (disable_log_bin) + fprintf(result_file, "/*!32316 SET SQL_LOG_BIN=@OLD_SQL_LOG_BIN*/;\n"); + if (tmpdir.list) free_tmpdir(&tmpdir); if (result_file != stdout) diff --git a/extra/perror.c b/extra/perror.c index a28626fd873..1bd4b203120 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -69,7 +69,7 @@ static HA_ERRORS ha_errlist[]= { { 120,"Didn't find key on read or update" }, { 121,"Duplicate key on write or update" }, - { 123,"Someone has changed the row since it was read; Update with is recoverable" }, + { 123,"Someone has changed the row since it was read (while the table was locked to prevent it)" }, { 124,"Wrong index given to function" }, { 126,"Index file is crashed" }, { 127,"Record-file is crashed" }, diff --git a/heap/hp_rfirst.c b/heap/hp_rfirst.c index 1668376ed1c..85548fea212 100644 --- a/heap/hp_rfirst.c +++ b/heap/hp_rfirst.c @@ -52,6 +52,7 @@ int heap_rfirst(HP_INFO *info, byte *record, int inx) my_errno=HA_ERR_END_OF_FILE; DBUG_RETURN(my_errno); } + DBUG_ASSERT(0); /* TODO fix it */ info->current_record=0; info->current_hash_ptr=0; info->update=HA_STATE_PREV_FOUND; diff --git a/include/m_ctype.h b/include/m_ctype.h index ddc21070547..26e285b9683 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -63,7 +63,7 @@ typedef struct unicase_info_st #define MY_CS_UNICODE 128 /* is a charset is full unicode */ #define MY_CS_READY 256 /* if a charset is initialized */ #define MY_CS_AVAILABLE 512 /* If either compiled-in or loaded*/ - +#define MY_CS_CSSORT 1024 /* if case sensitive sort order */ #define MY_CHARSET_UNDEFINED 0 diff --git a/include/my_time.h b/include/my_time.h index dab17904b2d..94701e159c4 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -58,14 +58,15 @@ void init_time(void); my_time_t my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap); -void set_zero_time(MYSQL_TIME *tm); +void set_zero_time(MYSQL_TIME *tm, enum enum_mysql_timestamp_type time_type); /* Required buffer length for my_time_to_str, my_date_to_str, my_datetime_to_str and TIME_to_string functions. Note, that the caller is still responsible to check that given TIME structure has values in valid ranges, otherwise size of the buffer could - be not enough. + be not enough. We also rely on the fact that even wrong values + sent using binary protocol fit in this buffer. */ #define MAX_DATE_STRING_REP_LENGTH 30 diff --git a/include/mysql_time.h b/include/mysql_time.h index ec67d60dea5..5f4fc12c005 100644 --- a/include/mysql_time.h +++ b/include/mysql_time.h @@ -33,6 +33,18 @@ enum enum_mysql_timestamp_type }; +/* + Structure which is used to represent datetime values inside MySQL. + + We assume that values in this structure are normalized, i.e. year <= 9999, + month <= 12, day <= 31, hour <= 23, hour <= 59, hour <= 59. Many functions + in server such as my_system_gmt_sec() or make_time() family of functions + rely on this (actually now usage of make_*() family relies on a bit weaker + restriction). Also functions that produce MYSQL_TIME as result ensure this. + There is one exception to this rule though if this structure holds time + value (time_type == MYSQL_TIMESTAMP_TIME) days and hour member can hold + bigger values. +*/ typedef struct st_mysql_time { unsigned int year, month, day, hour, minute, second; diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c index adb69f3c3a7..699ad5fb42e 100644 --- a/innobase/buf/buf0buf.c +++ b/innobase/buf/buf0buf.c @@ -547,8 +547,9 @@ buf_pool_init( } /*----------------------------------------*/ } else { - buf_pool->frame_mem = ut_malloc( - UNIV_PAGE_SIZE * (n_frames + 1)); + buf_pool->frame_mem = ut_malloc_low( + UNIV_PAGE_SIZE * (n_frames + 1), + TRUE, FALSE); } if (buf_pool->frame_mem == NULL) { diff --git a/innobase/buf/buf0lru.c b/innobase/buf/buf0lru.c index 796311f0157..f3fb19ae183 100644 --- a/innobase/buf/buf0lru.c +++ b/innobase/buf/buf0lru.c @@ -42,6 +42,10 @@ initial segment in buf_LRU_get_recent_limit */ #define BUF_LRU_INITIAL_RATIO 8 +/* If we switch on the InnoDB monitor because there are too few available +frames in the buffer pool, we set this to TRUE */ +ibool buf_lru_switched_on_innodb_mon = FALSE; + /********************************************************************** Takes a block out of the LRU list and page hash table and sets the block state to BUF_BLOCK_REMOVE_HASH. */ @@ -288,6 +292,32 @@ buf_LRU_try_free_flushed_blocks(void) } /********************************************************************** +Returns TRUE if less than 15 % of the buffer pool is available. This can be +used in heuristics to prevent huge transactions eating up the whole buffer +pool for their locks. */ + +ibool +buf_LRU_buf_pool_running_out(void) +/*==============================*/ + /* out: TRUE if less than 15 % of buffer pool + left */ +{ + ibool ret = FALSE; + + mutex_enter(&(buf_pool->mutex)); + + if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) + + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 7) { + + ret = TRUE; + } + + mutex_exit(&(buf_pool->mutex)); + + return(ret); +} + +/********************************************************************** Returns a free block from buf_pool. The block is taken off the free list. If it is empty, blocks are moved from the end of the LRU list to the free list. */ @@ -325,7 +355,8 @@ loop: } else if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 5) { - if (!srv_print_innodb_monitor) { + + if (!buf_lru_switched_on_innodb_mon) { /* Over 80 % of the buffer pool is occupied by lock heaps or the adaptive hash index. This may be a memory @@ -342,16 +373,18 @@ loop: "InnoDB: lock heap and hash index sizes.\n", (ulong) (buf_pool->curr_size / (1024 * 1024 / UNIV_PAGE_SIZE))); + buf_lru_switched_on_innodb_mon = TRUE; srv_print_innodb_monitor = TRUE; os_event_set(srv_lock_timeout_thread_event); } - } else if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) - + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 4) { + } else if (buf_lru_switched_on_innodb_mon) { /* Switch off the InnoDB Monitor; this is a simple way to stop the monitor if the situation becomes less urgent, - but may also surprise users! */ + but may also surprise users if the user also switched on the + monitor! */ + buf_lru_switched_on_innodb_mon = FALSE; srv_print_innodb_monitor = FALSE; } diff --git a/innobase/configure.in b/innobase/configure.in index 652291f1f38..d83da9fdc5c 100644 --- a/innobase/configure.in +++ b/innobase/configure.in @@ -110,6 +110,9 @@ esac case "$target" in i[[4567]]86-*-*) CFLAGS="$CFLAGS -DUNIV_INTEL_X86";; + # The compiler on Linux/S390 does not seem to have inlining + s390-*-*) + CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; esac AC_OUTPUT(Makefile os/Makefile ut/Makefile btr/Makefile dnl diff --git a/innobase/include/buf0lru.h b/innobase/include/buf0lru.h index 69a376f8cab..45164dd561e 100644 --- a/innobase/include/buf0lru.h +++ b/innobase/include/buf0lru.h @@ -25,6 +25,16 @@ wasted. */ void buf_LRU_try_free_flushed_blocks(void); /*==================================*/ +/********************************************************************** +Returns TRUE if less than 15 % of the buffer pool is available. This can be +used in heuristics to prevent huge transactions eating up the whole buffer +pool for their locks. */ + +ibool +buf_LRU_buf_pool_running_out(void); +/*==============================*/ + /* out: TRUE if less than 15 % of buffer pool + left */ /*####################################################################### These are low-level functions diff --git a/innobase/include/db0err.h b/innobase/include/db0err.h index be7667bfd0c..de5ac44e73f 100644 --- a/innobase/include/db0err.h +++ b/innobase/include/db0err.h @@ -53,7 +53,11 @@ Created 5/24/1996 Heikki Tuuri name already exists */ #define DB_TABLESPACE_DELETED 44 /* tablespace does not exist or is being dropped right now */ - +#define DB_LOCK_TABLE_FULL 45 /* lock structs have exhausted the + buffer pool (for big transactions, + InnoDB stores the lock structs in the + buffer pool) */ + /* The following are partial failure codes */ #define DB_FAIL 1000 #define DB_OVERFLOW 1001 diff --git a/innobase/include/row0sel.h b/innobase/include/row0sel.h index bb6fb70ca86..8d5187bfc1c 100644 --- a/innobase/include/row0sel.h +++ b/innobase/include/row0sel.h @@ -120,6 +120,7 @@ row_search_for_mysql( /* out: DB_SUCCESS, DB_RECORD_NOT_FOUND, DB_END_OF_INDEX, DB_DEADLOCK, + DB_LOCK_TABLE_FULL, or DB_TOO_BIG_RECORD */ byte* buf, /* in/out: buffer for the fetched row in the MySQL format */ diff --git a/innobase/include/ut0mem.h b/innobase/include/ut0mem.h index 73ecb25101a..74357f6bf13 100644 --- a/innobase/include/ut0mem.h +++ b/innobase/include/ut0mem.h @@ -38,8 +38,10 @@ ut_malloc_low( /*==========*/ /* out, own: allocated memory */ ulint n, /* in: number of bytes to allocate */ - ibool set_to_zero); /* in: TRUE if allocated memory should be set + ibool set_to_zero, /* in: TRUE if allocated memory should be set to zero if UNIV_SET_MEM_TO_ZERO is defined */ + ibool assert_on_error); /* in: if TRUE, we crash mysqld if the memory + cannot be allocated */ /************************************************************************** Allocates memory. Sets it also to zero if UNIV_SET_MEM_TO_ZERO is defined. */ diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c index 6f2d58b72c3..78a78c9dd95 100644 --- a/innobase/lock/lock0lock.c +++ b/innobase/lock/lock0lock.c @@ -4064,6 +4064,9 @@ lock_print_info( (ulong) ut_dulint_get_low(purge_sys->purge_undo_no)); fprintf(file, + "History list length %lu\n", (ulong) trx_sys->rseg_history_len); + + fprintf(file, "Total number of lock structs in row lock hash table %lu\n", (ulong) lock_get_n_rec_locks()); diff --git a/innobase/mem/mem0pool.c b/innobase/mem/mem0pool.c index 023369e8ec5..cb891a03092 100644 --- a/innobase/mem/mem0pool.c +++ b/innobase/mem/mem0pool.c @@ -199,7 +199,7 @@ mem_pool_create( but only when allocated at a higher level in mem0mem.c. This is to avoid masking useful Purify warnings. */ - pool->buf = ut_malloc_low(size, FALSE); + pool->buf = ut_malloc_low(size, FALSE, TRUE); pool->size = size; mutex_create(&(pool->mutex)); diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c index 17747ae2a8e..9613da2e286 100644 --- a/innobase/row/row0mysql.c +++ b/innobase/row/row0mysql.c @@ -308,7 +308,8 @@ handle_new_error: return(TRUE); - } else if (err == DB_DEADLOCK || err == DB_LOCK_WAIT_TIMEOUT) { + } else if (err == DB_DEADLOCK || err == DB_LOCK_WAIT_TIMEOUT + || err == DB_LOCK_TABLE_FULL) { /* Roll back the whole transaction; this resolution was added to version 3.23.43 */ diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c index 740241fa210..edaa555c555 100644 --- a/innobase/row/row0sel.c +++ b/innobase/row/row0sel.c @@ -638,23 +638,24 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - /* If innodb_locks_unsafe_for_binlog option is used, - we lock only the record, i.e. next-key locking is - not used. - */ - if ( srv_locks_unsafe_for_binlog ) - { - err = lock_clust_rec_read_check_and_lock(0, clust_rec, - index,node->row_lock_mode, LOCK_REC_NOT_GAP, thr); - } - else - { - err = lock_clust_rec_read_check_and_lock(0, clust_rec, index, - node->row_lock_mode, LOCK_ORDINARY, thr); - - } - - if (err != DB_SUCCESS) { + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + + if (srv_locks_unsafe_for_binlog) { + err = lock_clust_rec_read_check_and_lock(0, + clust_rec, + index, node->row_lock_mode, + LOCK_REC_NOT_GAP, thr); + } else { + err = lock_clust_rec_read_check_and_lock(0, + clust_rec, + index, node->row_lock_mode, + LOCK_ORDINARY, thr); + } + + if (err != DB_SUCCESS) { return(err); } @@ -729,8 +730,18 @@ sel_set_rec_lock( ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or LOC_REC_NOT_GAP */ que_thr_t* thr) /* in: query thread */ { + trx_t* trx; ulint err; + trx = thr_get_trx(thr); + + if (UT_LIST_GET_LEN(trx->trx_locks) > 10000) { + if (buf_LRU_buf_pool_running_out()) { + + return(DB_LOCK_TABLE_FULL); + } + } + if (index->type & DICT_CLUSTERED) { err = lock_clust_rec_read_check_and_lock(0, rec, index, mode, type, thr); @@ -1205,22 +1216,24 @@ rec_loop: if (!consistent_read) { - /* If innodb_locks_unsafe_for_binlog option is used, - we lock only the record, i.e. next-key locking is - not used. - */ - - if ( srv_locks_unsafe_for_binlog ) - { - err = sel_set_rec_lock(page_rec_get_next(rec), index, - node->row_lock_mode, LOCK_REC_NOT_GAP, thr); - } - else - { - err = sel_set_rec_lock(page_rec_get_next(rec), index, - node->row_lock_mode, LOCK_ORDINARY, thr); - } - if (err != DB_SUCCESS) { + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + + if (srv_locks_unsafe_for_binlog) { + err = sel_set_rec_lock(page_rec_get_next(rec), + index, + node->row_lock_mode, + LOCK_REC_NOT_GAP, thr); + } else { + err = sel_set_rec_lock(page_rec_get_next(rec), + index, + node->row_lock_mode, + LOCK_ORDINARY, thr); + } + + if (err != DB_SUCCESS) { /* Note that in this case we will store in pcur the PREDECESSOR of the record we are waiting the lock for */ @@ -1245,21 +1258,18 @@ rec_loop: if (!consistent_read) { /* Try to place a lock on the index record */ - /* If innodb_locks_unsafe_for_binlog option is used, - we lock only the record, i.e. next-key locking is - not used. - */ + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ - if ( srv_locks_unsafe_for_binlog ) - { - err = sel_set_rec_lock(rec, index, node->row_lock_mode, + if (srv_locks_unsafe_for_binlog) { + err = sel_set_rec_lock(rec, index, node->row_lock_mode, LOCK_REC_NOT_GAP, thr); - } - else - { - err = sel_set_rec_lock(rec, index, node->row_lock_mode, + } else { + err = sel_set_rec_lock(rec, index, node->row_lock_mode, LOCK_ORDINARY, thr); - } + } if (err != DB_SUCCESS) { @@ -2765,6 +2775,7 @@ row_search_for_mysql( /* out: DB_SUCCESS, DB_RECORD_NOT_FOUND, DB_END_OF_INDEX, DB_DEADLOCK, + DB_LOCK_TABLE_FULL, or DB_TOO_BIG_RECORD */ byte* buf, /* in/out: buffer for the fetched row in the MySQL format */ @@ -3209,8 +3220,7 @@ rec_loop: we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ - if ( srv_locks_unsafe_for_binlog == FALSE ) - { + if (srv_locks_unsafe_for_binlog == FALSE) { err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_ORDINARY, thr); @@ -3312,11 +3322,18 @@ rec_loop: if (prebuilt->select_lock_type != LOCK_NONE && set_also_gap_locks) { - /* Try to place a lock on the index record */ - err = sel_set_rec_lock(rec, index, + /* Try to place a gap lock on the index + record only if innodb_locks_unsafe_for_binlog + option is not set */ + + if (srv_locks_unsafe_for_binlog == FALSE) { + + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_GAP, thr); + } + if (err != DB_SUCCESS) { goto lock_wait_or_error; @@ -3338,11 +3355,18 @@ rec_loop: if (prebuilt->select_lock_type != LOCK_NONE && set_also_gap_locks) { - /* Try to place a lock on the index record */ - err = sel_set_rec_lock(rec, index, + /* Try to place a gap lock on the index + record only if innodb_locks_unsafe_for_binlog + option is not set */ + + if (srv_locks_unsafe_for_binlog == FALSE) { + + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_GAP, thr); + } + if (err != DB_SUCCESS) { goto lock_wait_or_error; @@ -3376,19 +3400,16 @@ rec_loop: prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr); } else { - /* If innodb_locks_unsafe_for_binlog option is used, - we lock only the record, i.e. next-key locking is - not used. - */ - if ( srv_locks_unsafe_for_binlog ) - { - err = sel_set_rec_lock(rec, index, + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. */ + + if (srv_locks_unsafe_for_binlog) { + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr); - } - else - { - err = sel_set_rec_lock(rec, index, + } else { + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_ORDINARY, thr); } diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c index 9709f5235de..69341a1d7d1 100644 --- a/innobase/srv/srv0start.c +++ b/innobase/srv/srv0start.c @@ -1172,6 +1172,9 @@ NetWare. */ } if (ret == NULL) { + fprintf(stderr, +"InnoDB: Fatal error: cannot allocate the memory for the buffer pool\n"); + return(DB_ERROR); } diff --git a/innobase/trx/trx0purge.c b/innobase/trx/trx0purge.c index 5c62640e011..3df34111281 100644 --- a/innobase/trx/trx0purge.c +++ b/innobase/trx/trx0purge.c @@ -289,7 +289,7 @@ trx_purge_add_update_undo_to_history( flst_get_len(seg_header + TRX_UNDO_PAGE_LIST, mtr)); mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, - hist_size + undo->size, MLOG_4BYTES, mtr); + hist_size + undo->size, MLOG_4BYTES, mtr); } /* Add the log as the first in the history list */ @@ -646,6 +646,27 @@ trx_purge_rseg_get_next_history_log( mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); + mutex_enter(&kernel_mutex); + + /* Add debug code to track history list corruption reported + on the MySQL mailing list on Nov 9, 2004. The fut0lst.c + file-based list was corrupt. The prev node pointer was + FIL_NULL, even though the list length was over 8 million nodes! + We assume that purge truncates the history list in moderate + size pieces, and if we here reach the head of the list, the + list cannot be longer than 20 000 undo logs now. */ + + if (trx_sys->rseg_history_len > 20000) { + ut_print_timestamp(stderr); + fprintf(stderr, +" InnoDB: Warning: purge reached the head of the history list,\n" +"InnoDB: but its length is still reported as %lu! Make a detailed bug\n" +"InnoDB: report, and post it to bugs.mysql.com\n", + (ulong)trx_sys->rseg_history_len); + } + + mutex_exit(&kernel_mutex); + return; } diff --git a/innobase/trx/trx0undo.c b/innobase/trx/trx0undo.c index c1edc223cbc..8d1518753dd 100644 --- a/innobase/trx/trx0undo.c +++ b/innobase/trx/trx0undo.c @@ -1241,7 +1241,7 @@ trx_undo_lists_init( if (page_no != FIL_NULL && srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) { - + undo = trx_undo_mem_create_at_db_start(rseg, i, page_no, &mtr); size += undo->size; diff --git a/innobase/ut/ut0mem.c b/innobase/ut/ut0mem.c index a6002d7fd83..6ed61b0b5de 100644 --- a/innobase/ut/ut0mem.c +++ b/innobase/ut/ut0mem.c @@ -61,8 +61,10 @@ ut_malloc_low( /*==========*/ /* out, own: allocated memory */ ulint n, /* in: number of bytes to allocate */ - ibool set_to_zero) /* in: TRUE if allocated memory should be set + ibool set_to_zero, /* in: TRUE if allocated memory should be set to zero if UNIV_SET_MEM_TO_ZERO is defined */ + ibool assert_on_error) /* in: if TRUE, we crash mysqld if the memory + cannot be allocated */ { void* ret; @@ -86,9 +88,7 @@ ut_malloc_low( "InnoDB: Check if you should increase the swap file or\n" "InnoDB: ulimits of your operating system.\n" "InnoDB: On FreeBSD check you have compiled the OS with\n" - "InnoDB: a big enough maximum process size.\n" - "InnoDB: We now intentionally generate a seg fault so that\n" - "InnoDB: on Linux we get a stack trace.\n", + "InnoDB: a big enough maximum process size.\n", (ulong) n, (ulong) ut_total_allocated_memory, #ifdef __WIN__ (ulong) GetLastError() @@ -110,7 +110,15 @@ ut_malloc_low( /* Intentional segfault on NetWare causes an abend. Avoid this by graceful exit handling in ut_a(). */ #if (!defined __NETWARE__) - if (*ut_mem_null_ptr) ut_mem_null_ptr = 0; + if (assert_on_error) { + fprintf(stderr, + "InnoDB: We now intentionally generate a seg fault so that\n" + "InnoDB: on Linux we get a stack trace.\n"); + + if (*ut_mem_null_ptr) ut_mem_null_ptr = 0; + } else { + return(NULL); + } #else ut_a(0); #endif @@ -144,7 +152,7 @@ ut_malloc( /* out, own: allocated memory */ ulint n) /* in: number of bytes to allocate */ { - return(ut_malloc_low(n, TRUE)); + return(ut_malloc_low(n, TRUE, TRUE)); } /************************************************************************** diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index a57c82e6424..cb20c5181a8 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -3257,11 +3257,12 @@ static void read_binary_time(MYSQL_TIME *tm, uchar **pos) tm->hour+= tm->day*24; tm->day= 0; } + tm->time_type= MYSQL_TIMESTAMP_TIME; + *pos+= length; } else - set_zero_time(tm); - tm->time_type= MYSQL_TIMESTAMP_TIME; + set_zero_time(tm, MYSQL_TIMESTAMP_TIME); } static void read_binary_datetime(MYSQL_TIME *tm, uchar **pos) @@ -3286,12 +3287,12 @@ static void read_binary_datetime(MYSQL_TIME *tm, uchar **pos) else tm->hour= tm->minute= tm->second= 0; tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0; + tm->time_type= MYSQL_TIMESTAMP_DATETIME; *pos+= length; } else - set_zero_time(tm); - tm->time_type= MYSQL_TIMESTAMP_DATETIME; + set_zero_time(tm, MYSQL_TIMESTAMP_DATETIME); } static void read_binary_date(MYSQL_TIME *tm, uchar **pos) @@ -3308,12 +3309,12 @@ static void read_binary_date(MYSQL_TIME *tm, uchar **pos) tm->hour= tm->minute= tm->second= 0; tm->second_part= 0; tm->neg= 0; + tm->time_type= MYSQL_TIMESTAMP_DATE; *pos+= length; } else - set_zero_time(tm); - tm->time_type= MYSQL_TIMESTAMP_DATE; + set_zero_time(tm, MYSQL_TIMESTAMP_DATE); } diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c index 9a51eff88e1..c432ac5a16c 100644 --- a/myisam/ft_boolean_search.c +++ b/myisam/ft_boolean_search.c @@ -348,7 +348,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) FTB_EXPR *top_ftbe=ftbe->up->up; ftbw->docid[0]=HA_OFFSET_ERROR; for (ftbe=ftbw->up; ftbe != top_ftbe; ftbe=ftbe->up) - if (ftbe->flags & FTB_FLAG_YES) + if (!(ftbe->flags & FTB_FLAG_NO)) ftbe->yweaks++; ftbe=0; break; @@ -356,7 +356,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) } if (!ftbe) continue; - /* 3 */ + /* 4 */ if (!is_tree_inited(& ftb->no_dupes)) init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t), _ftb_no_dupes_cmp,0,0,0); diff --git a/myisam/mi_create.c b/myisam/mi_create.c index 7fc7cc4edf1..f99a2c655d2 100644 --- a/myisam/mi_create.c +++ b/myisam/mi_create.c @@ -16,7 +16,7 @@ /* Create a MyISAM table */ -#include "fulltext.h" +#include "ftdefs.h" #include "sp_defs.h" #if defined(MSDOS) || defined(__WIN__) @@ -41,7 +41,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, File dfile,file; int errpos,save_errno; myf create_flag; - uint fields,length,max_key_length,packed,pointer, + uint fields,length,max_key_length,packed,pointer,real_length_diff, key_length,info_length,key_segs,options,min_key_length_skip, base_pos,varchar_count,long_varchar_count,varchar_length, max_key_block_length,unique_key_parts,fulltext_keys,offset; @@ -238,7 +238,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, { share.state.key_root[i]= HA_OFFSET_ERROR; - min_key_length_skip=length=0; + min_key_length_skip=length=real_length_diff=0; key_length=pointer; if (keydef->flag & HA_SPATIAL) { @@ -297,6 +297,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, key_length+= HA_FT_MAXBYTELEN+HA_FT_WLEN; length++; /* At least one length byte */ min_key_length_skip+=HA_FT_MAXBYTELEN; + real_length_diff=HA_FT_MAXBYTELEN-FT_MAX_WORD_LEN_FOR_SORT; } else { @@ -397,7 +398,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, key_segs) share.state.rec_per_key_part[key_segs-1]=1L; length+=key_length; - keydef->block_length= MI_BLOCK_SIZE(length,pointer,MI_MAX_KEYPTR_SIZE); + keydef->block_length= MI_BLOCK_SIZE(length-real_length_diff, + pointer,MI_MAX_KEYPTR_SIZE); if (keydef->block_length > MI_MAX_KEY_BLOCK_LENGTH || length >= MI_MAX_KEY_BUFF) { diff --git a/myisam/mi_test3.c b/myisam/mi_test3.c index dca04a9a64b..27d23317b5c 100644 --- a/myisam/mi_test3.c +++ b/myisam/mi_test3.c @@ -40,7 +40,7 @@ #endif -const char *filename= "test3.MSI"; +const char *filename= "test3"; uint tests=10,forks=10,key_cacheing=0,use_log=0; static void get_options(int argc, char *argv[]); @@ -363,7 +363,7 @@ int test_write(MI_INFO *file,int id,int lock_type) } sprintf(record.id,"%7d",getpid()); - strmov(record.text,"Testing..."); + strnmov(record.text,"Testing...", sizeof(record.text)); tries=(uint) rnd(100)+10; for (i=count=0 ; i < tries ; i++) diff --git a/myisam/mi_write.c b/myisam/mi_write.c index e059bbb569f..303e924118f 100644 --- a/myisam/mi_write.c +++ b/myisam/mi_write.c @@ -165,12 +165,7 @@ err: { uint j; for (j=0 ; j < share->base.keys ; j++) - { - if (is_tree_inited(&info->bulk_insert[j])) - { - reset_tree(&info->bulk_insert[j]); - } - } + mi_flush_bulk_insert(info, j); } info->errkey= (int) i; while ( i-- > 0) @@ -329,7 +324,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *temp_buff,*keypos; uchar keybuff[MI_MAX_KEY_BUFF]; my_bool was_last_key; - my_off_t next_page; + my_off_t next_page, dupp_key_pos; DBUG_ENTER("w_search"); DBUG_PRINT("enter",("page: %ld",page)); @@ -349,9 +344,9 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, /* get position to record with duplicated key */ tmp_key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&keypos,keybuff); if (tmp_key_length) - info->dupp_key_pos=_mi_dpos(info,0,keybuff+tmp_key_length); + dupp_key_pos=_mi_dpos(info,0,keybuff+tmp_key_length); else - info->dupp_key_pos= HA_OFFSET_ERROR; + dupp_key_pos= HA_OFFSET_ERROR; if (keyinfo->flag & HA_FULLTEXT) { uint off; @@ -370,7 +365,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, else { /* popular word. two-level tree. going down */ - my_off_t root=info->dupp_key_pos; + my_off_t root=dupp_key_pos; keyinfo=&info->s->ft2_keyinfo; get_key_full_length_rdonly(off, key); key+=off; @@ -389,6 +384,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, } else /* not HA_FULLTEXT, normal HA_NOSAME key */ { + info->dupp_key_pos= dupp_key_pos; my_afree((byte*) temp_buff); my_errno=HA_ERR_FOUND_DUPP_KEY; DBUG_RETURN(-1); diff --git a/mysql-test/include/ctype_common.inc b/mysql-test/include/ctype_common.inc new file mode 100644 index 00000000000..77937bdb854 --- /dev/null +++ b/mysql-test/include/ctype_common.inc @@ -0,0 +1,56 @@ +# +# Common tests for all character sets and collations. +# Include this file from a test with @test_characrer_set +# and @test_collation set to desired values. +# +# Please don't use SHOW CREATE TABLE in this file, +# we want it to be HANDLER independent. You can +# use SHOW FULL COLUMNS instead. +# +# Please surround all CREATE TABLE with --disable_warnings +# and --enable_warnings to be able to set storage_engine +# without having to check if the hanlder exists. + +SET @safe_character_set_server= @@character_set_server; +SET @safe_collation_server= @@collation_server; +SET character_set_server= @test_character_set; +SET collation_server= @test_collation; +CREATE DATABASE d1; +USE d1; + +# +# Bug 1883: LIKE did not work in some cases with a key. +# +--disable_warnings +CREATE TABLE t1 (c CHAR(10), KEY(c)); +--enable_warnings +# check the column was created with the expected charset/collation +SHOW FULL COLUMNS FROM t1; +INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa'); +SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%'; +DROP TABLE t1; + +# +# Bug 6643 incorrect response with partial utf8 index +# +--disable_warnings +CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2))); +--enable_warnings +# check the column was created with the expected charset/collation +SHOW FULL COLUMNS FROM t1; +INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab'); +SELECT c1 as want3results from t1 where c1 like 'l%'; +SELECT c1 as want3results from t1 where c1 like 'lo%'; +SELECT c1 as want1result from t1 where c1 like 'loc%'; +SELECT c1 as want1result from t1 where c1 like 'loca%'; +SELECT c1 as want1result from t1 where c1 like 'locat%'; +SELECT c1 as want1result from t1 where c1 like 'locati%'; +SELECT c1 as want1result from t1 where c1 like 'locatio%'; +SELECT c1 as want1result from t1 where c1 like 'location%'; +DROP TABLE t1; + +DROP DATABASE d1; +# Restore settings +USE test; +SET character_set_server= @safe_character_set_server; +SET collation_server= @safe_collation_server; diff --git a/mysql-test/mysql_test_run.c b/mysql-test/mysql_test_run.c deleted file mode 100644 index 6f388fc4a45..00000000000 --- a/mysql-test/mysql_test_run.c +++ /dev/null @@ -1,1728 +0,0 @@ -/* - Copyright (c) 2002, 2003 Novell, Inc. All Rights Reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -#include <stdlib.h> -#include <stdio.h> -#include <errno.h> -#ifndef __WIN__ -#include <dirent.h> -#endif -#include <string.h> -#ifdef __NETWARE__ -#include <screen.h> -#include <nks/vm.h> -#endif -#include <ctype.h> -#include <sys/stat.h> -#ifndef __WIN__ -#include <unistd.h> -#endif -#include <fcntl.h> -#ifdef __NETWARE__ -#include <sys/mode.h> -#endif -#ifdef __WIN__ -#include <Shlwapi.h> -#include <direct.h> -#endif - -#include "my_manage.h" - -/****************************************************************************** - - macros - -******************************************************************************/ - -#define HEADER "TEST RESULT \n" -#define DASH "-------------------------------------------------------\n" - -#define NW_TEST_SUFFIX ".nw-test" -#define NW_RESULT_SUFFIX ".nw-result" -#define TEST_SUFFIX ".test" -#define RESULT_SUFFIX ".result" -#define REJECT_SUFFIX ".reject" -#define OUT_SUFFIX ".out" -#define ERR_SUFFIX ".err" - -const char *TEST_PASS = "[ pass ]"; -const char *TEST_SKIP = "[ skip ]"; -const char *TEST_FAIL = "[ fail ]"; -const char *TEST_BAD = "[ bad ]"; -const char *TEST_IGNORE = "[ignore]"; - -/****************************************************************************** - - global variables - -******************************************************************************/ -#ifdef __NETWARE__ -static char base_dir[PATH_MAX] = "sys:/mysql"; -#else -static char base_dir[PATH_MAX] = ".."; -#endif -static char db[PATH_MAX] = "test"; -static char user[PATH_MAX] = "root"; -static char password[PATH_MAX] = ""; - -int master_port = 9306; -int slave_port = 9307; - -#if !defined(__NETWARE__) && !defined(__WIN__) -static char master_socket[PATH_MAX] = "./var/tmp/master.sock"; -static char slave_socket[PATH_MAX] = "./var/tmp/slave.sock"; -#endif - -// comma delimited list of tests to skip or empty string -#ifndef __WIN__ -static char skip_test[PATH_MAX] = " lowercase_table3 , system_mysql_db_fix "; -#else -/* - The most ignore testes contain the calls of system command -*/ -#define MAX_COUNT_TESTES 1024 -/* - lowercase_table3 is disabled by Gerg - system_mysql_db_fix is disabled by Gerg - sp contains a command system - rpl_EE_error contains a command system - rpl_loaddatalocal contains a command system - ndb_autodiscover contains a command system - rpl_rotate_logs contains a command system - repair contains a command system - rpl_trunc_binlog contains a command system - mysqldump contains a command system - rpl000001 makes non-exit loop...temporary skiped -*/ -static char skip_test[PATH_MAX] = " lowercase_table3 , system_mysql_db_fix , sp , rpl_EE_error , rpl_loaddatalocal , ndb_autodiscover , rpl_rotate_logs , repair , rpl_trunc_binlog , mysqldump , rpl000001 "; -#endif -static char ignore_test[PATH_MAX] = ""; - -static char bin_dir[PATH_MAX]; -static char mysql_test_dir[PATH_MAX]; -static char test_dir[PATH_MAX]; -static char mysql_tmp_dir[PATH_MAX]; -static char result_dir[PATH_MAX]; -static char master_dir[PATH_MAX]; -static char slave_dir[PATH_MAX]; -static char lang_dir[PATH_MAX]; -static char char_dir[PATH_MAX]; - -static char mysqladmin_file[PATH_MAX]; -static char mysqld_file[PATH_MAX]; -static char mysqltest_file[PATH_MAX]; -#ifndef __WIN__ -static char master_pid[PATH_MAX]; -static char slave_pid[PATH_MAX]; -static char sh_file[PATH_MAX] = "/bin/sh"; -#else -static HANDLE master_pid; -static HANDLE slave_pid; -#endif - -static char master_opt[PATH_MAX] = ""; -static char slave_opt[PATH_MAX] = ""; - -static char slave_master_info[PATH_MAX] = ""; - -static char master_init_script[PATH_MAX] = ""; -static char slave_init_script[PATH_MAX] = ""; - -// OpenSSL -static char ca_cert[PATH_MAX]; -static char server_cert[PATH_MAX]; -static char server_key[PATH_MAX]; -static char client_cert[PATH_MAX]; -static char client_key[PATH_MAX]; - -int total_skip = 0; -int total_pass = 0; -int total_fail = 0; -int total_test = 0; - -int total_ignore = 0; - -int use_openssl = FALSE; -int master_running = FALSE; -int slave_running = FALSE; -int skip_slave = TRUE; -int single_test = TRUE; - -int restarts = 0; - -FILE *log_fd = NULL; - -/****************************************************************************** - - functions - -******************************************************************************/ - -/****************************************************************************** - - prototypes - -******************************************************************************/ - -void report_stats(); -void install_db(char *); -void mysql_install_db(); -void start_master(); -void start_slave(); -void mysql_start(); -void stop_slave(); -void stop_master(); -void mysql_stop(); -void mysql_restart(); -int read_option(char *, char *); -void run_test(char *); -void setup(char *); -void vlog(const char *, va_list); -void mlog(const char *, ...); -void log_info(const char *, ...); -void log_error(const char *, ...); -void log_errno(const char *, ...); -void die(const char *); -char *str_tok(char *string, const char *delim); -#ifndef __WIN__ -void run_init_script(const char *script_name); -#endif -/****************************************************************************** - - report_stats() - - Report the gathered statistics. - -******************************************************************************/ -void report_stats() -{ - if (total_fail == 0) - { - mlog("\nAll %d test(s) were successful.\n", total_test); - } - else - { - double percent = ((double)total_pass / total_test) * 100; - - mlog("\nFailed %u/%u test(s), %.02f%% successful.\n", - total_fail, total_test, percent); - mlog("\nThe .out and .err files in %s may give you some\n", result_dir); - mlog("hint of what when wrong.\n"); - mlog("\nIf you want to report this error, please first read the documentation\n"); - mlog("at: http://www.mysql.com/doc/M/y/MySQL_test_suite.html\n"); - } -} - -/****************************************************************************** - - install_db() - - Install the a database. - -******************************************************************************/ -void install_db(char *datadir) -{ - arg_list_t al; - int err; - char input[PATH_MAX]; - char output[PATH_MAX]; - char error[PATH_MAX]; - - // input file -#ifdef __NETWARE__ - snprintf(input, PATH_MAX, "%s/bin/init_db.sql", base_dir); -#else - snprintf(input, PATH_MAX, "%s/mysql-test/init_db.sql", base_dir); -#endif - snprintf(output, PATH_MAX, "%s/install.out", datadir); - snprintf(error, PATH_MAX, "%s/install.err", datadir); - - // args - init_args(&al); - add_arg(&al, mysqld_file); - add_arg(&al, "--no-defaults"); - add_arg(&al, "--bootstrap"); - add_arg(&al, "--skip-grant-tables"); - add_arg(&al, "--basedir=%s", base_dir); - add_arg(&al, "--datadir=%s", datadir); - add_arg(&al, "--skip-innodb"); - add_arg(&al, "--skip-bdb"); -#ifndef __NETWARE__ - add_arg(&al, "--character-sets-dir=%s", char_dir); - add_arg(&al, "--language=%s", lang_dir); -#endif - - // spawn - if ((err = spawn(mysqld_file, &al, TRUE, input, output, error, NULL)) != 0) - { - die("Unable to create database."); - } - - // free args - free_args(&al); -} - -/****************************************************************************** - - mysql_install_db() - - Install the test databases. - -******************************************************************************/ -void mysql_install_db() -{ - char temp[PATH_MAX]; - - // var directory - snprintf(temp, PATH_MAX, "%s/var", mysql_test_dir); - - // clean up old direcotry - del_tree(temp); - - // create var directory -#ifndef __WIN__ - mkdir(temp, S_IRWXU); - // create subdirectories - mlog("Creating test-suite folders...\n"); - snprintf(temp, PATH_MAX, "%s/var/run", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/tmp", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/master-data", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/master-data/mysql", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/master-data/test", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/slave-data", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/slave-data/mysql", mysql_test_dir); - mkdir(temp, S_IRWXU); - snprintf(temp, PATH_MAX, "%s/var/slave-data/test", mysql_test_dir); - mkdir(temp, S_IRWXU); -#else - mkdir(temp); - // create subdirectories - mlog("Creating test-suite folders...\n"); - snprintf(temp, PATH_MAX, "%s/var/run", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/tmp", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/master-data", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/master-data/mysql", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/master-data/test", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/slave-data", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/slave-data/mysql", mysql_test_dir); - mkdir(temp); - snprintf(temp, PATH_MAX, "%s/var/slave-data/test", mysql_test_dir); - mkdir(temp); -#endif - - // install databases - mlog("Creating test databases for master... \n"); - install_db(master_dir); - mlog("Creating test databases for slave... \n"); - install_db(slave_dir); -} - -/****************************************************************************** - - start_master() - - Start the master server. - -******************************************************************************/ -void start_master() -{ - arg_list_t al; - int err; - char master_out[PATH_MAX]; - char master_err[PATH_MAX]; -// char temp[PATH_MAX]; - char temp2[PATH_MAX]; - - // remove old berkeley db log files that can confuse the server - removef("%s/log.*", master_dir); - - // remove stale binary logs - removef("%s/var/log/*-bin.*", mysql_test_dir); - - // remove stale binary logs - removef("%s/var/log/*.index", mysql_test_dir); - - // remove master.info file - removef("%s/master.info", master_dir); - - // remove relay files - removef("%s/var/log/*relay*", mysql_test_dir); - - // remove relay-log.info file - removef("%s/relay-log.info", master_dir); - - // init script - if (master_init_script[0] != 0) - { -#ifdef __NETWARE__ - // TODO: use the scripts - if (strinstr(master_init_script, "repair_part2-master.sh") != 0) - { - FILE *fp; - - // create an empty index file - snprintf(temp, PATH_MAX, "%s/test/t1.MYI", master_dir); - fp = fopen(temp, "wb+"); - - fputs("1", fp); - - fclose(fp); - } -#elif !defined(__WIN__) - run_init_script(master_init_script); -#endif - } - - // redirection files - snprintf(master_out, PATH_MAX, "%s/var/run/master%u.out", - mysql_test_dir, restarts); - snprintf(master_err, PATH_MAX, "%s/var/run/master%u.err", - mysql_test_dir, restarts); -#ifndef __WIN__ - snprintf(temp2,PATH_MAX,"%s/var",mysql_test_dir); - mkdir(temp2,S_IRWXU); - snprintf(temp2,PATH_MAX,"%s/var/log",mysql_test_dir); - mkdir(temp2,S_IRWXU); -#else - snprintf(temp2,PATH_MAX,"%s/var",mysql_test_dir); - mkdir(temp2); - snprintf(temp2,PATH_MAX,"%s/var/log",mysql_test_dir); - mkdir(temp2); -#endif - // args - init_args(&al); - add_arg(&al, "%s", mysqld_file); - add_arg(&al, "--no-defaults"); - add_arg(&al, "--log-bin=%s/var/log/master-bin",mysql_test_dir); - add_arg(&al, "--server-id=1"); - add_arg(&al, "--basedir=%s", base_dir); - add_arg(&al, "--port=%u", master_port); -#if !defined(__NETWARE__) && !defined(__WIN__) - add_arg(&al, "--socket=%s",master_socket); -#endif - add_arg(&al, "--local-infile"); - add_arg(&al, "--core"); - add_arg(&al, "--datadir=%s", master_dir); -#ifndef __WIN__ - add_arg(&al, "--pid-file=%s", master_pid); -#endif - add_arg(&al, "--character-sets-dir=%s", char_dir); - add_arg(&al, "--tmpdir=%s", mysql_tmp_dir); - add_arg(&al, "--language=%s", lang_dir); -#ifdef DEBUG //only for debug builds - add_arg(&al, "--debug"); -#endif - - if (use_openssl) - { - add_arg(&al, "--ssl-ca=%s", ca_cert); - add_arg(&al, "--ssl-cert=%s", server_cert); - add_arg(&al, "--ssl-key=%s", server_key); - } - - // $MASTER_40_ARGS - add_arg(&al, "--rpl-recovery-rank=1"); - add_arg(&al, "--init-rpl-role=master"); - - // $SMALL_SERVER - add_arg(&al, "-O"); - add_arg(&al, "key_buffer_size=1M"); - add_arg(&al, "-O"); - add_arg(&al, "sort_buffer=256K"); - add_arg(&al, "-O"); - add_arg(&al, "max_heap_table_size=1M"); - - // $EXTRA_MASTER_OPT - if (master_opt[0] != 0) - { - char *p; - - p = (char *)str_tok(master_opt, " \t"); - if (!strstr(master_opt, "timezone")) - { - while (p) - { - add_arg(&al, "%s", p); - p = (char *)str_tok(NULL, " \t"); - } - } - } - - // remove the pid file if it exists -#ifndef __WIN__ - remove(master_pid); -#endif - - // spawn -#ifdef __WIN__ - if ((err= spawn(mysqld_file, &al, FALSE, NULL, master_out, master_err, &master_pid)) == 0) -#else - if ((err= spawn(mysqld_file, &al, FALSE, NULL, master_out, master_err, master_pid)) == 0) -#endif - { - sleep_until_file_exists(master_pid); - - if ((err = wait_for_server_start(bin_dir, mysqladmin_file, user, password, master_port, - mysql_tmp_dir)) == 0) - { - master_running = TRUE; - } - else - { - log_error("The master server went down early."); - } - } - else - { - log_error("Unable to start master server."); - } - - // free_args - free_args(&al); -} - -/****************************************************************************** - - start_slave() - - Start the slave server. - -******************************************************************************/ -void start_slave() -{ - arg_list_t al; - int err; - char slave_out[PATH_MAX]; - char slave_err[PATH_MAX]; - - // skip? - if (skip_slave) return; - - // remove stale binary logs - removef("%s/*-bin.*", slave_dir); - - // remove stale binary logs - removef("%s/*.index", slave_dir); - - // remove master.info file - removef("%s/master.info", slave_dir); - - // remove relay files - removef("%s/var/log/*relay*", mysql_test_dir); - - // remove relay-log.info file - removef("%s/relay-log.info", slave_dir); - - // init script - if (slave_init_script[0] != 0) - { -#ifdef __NETWARE__ - // TODO: use the scripts - if (strinstr(slave_init_script, "rpl000016-slave.sh") != 0) - { - // create empty master.info file - snprintf(temp, PATH_MAX, "%s/master.info", slave_dir); - close(open(temp, O_WRONLY | O_CREAT,S_IRWXU|S_IRWXG|S_IRWXO)); - } - else if (strinstr(slave_init_script, "rpl000017-slave.sh") != 0) - { - FILE *fp; - - // create a master.info file - snprintf(temp, PATH_MAX, "%s/master.info", slave_dir); - fp = fopen(temp, "wb+"); - - fputs("master-bin.000001\n", fp); - fputs("4\n", fp); - fputs("127.0.0.1\n", fp); - fputs("replicate\n", fp); - fputs("aaaaaaaaaaaaaaab\n", fp); - fputs("9306\n", fp); - fputs("1\n", fp); - fputs("0\n", fp); - - fclose(fp); - } - else if (strinstr(slave_init_script, "rpl_rotate_logs-slave.sh") != 0) - { - // create empty master.info file - snprintf(temp, PATH_MAX, "%s/master.info", slave_dir); - close(open(temp, O_WRONLY | O_CREAT,S_IRWXU|S_IRWXG|S_IRWXO)); - } -#elif !defined(__WIN__) - run_init_script(slave_init_script); -#endif - } - - // redirection files - snprintf(slave_out, PATH_MAX, "%s/var/run/slave%u.out", - mysql_test_dir, restarts); - snprintf(slave_err, PATH_MAX, "%s/var/run/slave%u.err", - mysql_test_dir, restarts); - - // args - init_args(&al); - add_arg(&al, "%s", mysqld_file); - add_arg(&al, "--no-defaults"); - add_arg(&al, "--log-bin=slave-bin"); - add_arg(&al, "--relay_log=slave-relay-bin"); - add_arg(&al, "--basedir=%s", base_dir); - add_arg(&al, "--port=%u", slave_port); -#if !defined(__NETWARE__) && !defined(__WIN__) - add_arg(&al, "--socket=%s",slave_socket); -#endif - add_arg(&al, "--datadir=%s", slave_dir); -#ifndef __WIN__ - add_arg(&al, "--pid-file=%s", slave_pid); -#endif - add_arg(&al, "--character-sets-dir=%s", char_dir); - add_arg(&al, "--core"); - add_arg(&al, "--tmpdir=%s", mysql_tmp_dir); - add_arg(&al, "--language=%s", lang_dir); - - add_arg(&al, "--exit-info=256"); - add_arg(&al, "--log-slave-updates"); - add_arg(&al, "--init-rpl-role=slave"); - add_arg(&al, "--skip-innodb"); - add_arg(&al, "--skip-slave-start"); - add_arg(&al, "--slave-load-tmpdir=../../var/tmp"); - - add_arg(&al, "--report-user=%s", user); - add_arg(&al, "--report-host=127.0.0.1"); - add_arg(&al, "--report-port=%u", slave_port); - - add_arg(&al, "--master-retry-count=10"); - add_arg(&al, "-O"); - add_arg(&al, "slave_net_timeout=10"); -#ifdef DEBUG //only for debug builds - add_arg(&al, "--debug"); -#endif - - if (use_openssl) - { - add_arg(&al, "--ssl-ca=%s", ca_cert); - add_arg(&al, "--ssl-cert=%s", server_cert); - add_arg(&al, "--ssl-key=%s", server_key); - } - - // slave master info - if (slave_master_info[0] != 0) - { - char *p; - - p = (char *)str_tok(slave_master_info, " \t"); - - while(p) - { - add_arg(&al, "%s", p); - - p = (char *)str_tok(NULL, " \t"); - } - } - else - { - add_arg(&al, "--master-user=%s", user); - add_arg(&al, "--master-password=%s", password); - add_arg(&al, "--master-host=127.0.0.1"); - add_arg(&al, "--master-port=%u", master_port); - add_arg(&al, "--master-connect-retry=1"); - add_arg(&al, "--server-id=2"); - add_arg(&al, "--rpl-recovery-rank=2"); - } - - // small server - add_arg(&al, "-O"); - add_arg(&al, "key_buffer_size=1M"); - add_arg(&al, "-O"); - add_arg(&al, "sort_buffer=256K"); - add_arg(&al, "-O"); - add_arg(&al, "max_heap_table_size=1M"); - - - // opt args - if (slave_opt[0] != 0) - { - char *p; - - p = (char *)str_tok(slave_opt, " \t"); - - while(p) - { - add_arg(&al, "%s", p); - - p = (char *)str_tok(NULL, " \t"); - } - } - - // remove the pid file if it exists -#ifndef __WIN__ - remove(slave_pid); -#endif - // spawn -#ifdef __WIN__ - if ((err = spawn(mysqld_file, &al, FALSE, NULL, slave_out, slave_err, &slave_pid)) == 0) -#else - if ((err = spawn(mysqld_file, &al, FALSE, NULL, slave_out, slave_err, slave_pid)) == 0) -#endif - { - sleep_until_file_exists(slave_pid); - - if ((err = wait_for_server_start(bin_dir, mysqladmin_file, user, password, slave_port, - mysql_tmp_dir)) == 0) - { - slave_running = TRUE; - } - else - { - log_error("The slave server went down early."); - } - } - else - { - log_error("Unable to start slave server."); - } - - // free args - free_args(&al); -} - -/****************************************************************************** - - mysql_start() - - Start the mysql servers. - -******************************************************************************/ -void mysql_start() -{ -// log_info("Starting the MySQL server(s): %u", ++restarts); - start_master(); - - start_slave(); - - // activate the test screen -#ifdef __NETWARE__ - ActivateScreen(getscreenhandle()); -#endif -} - -/****************************************************************************** - - stop_slave() - - Stop the slave server. - -******************************************************************************/ -void stop_slave() -{ - int err; - - // running? - if (!slave_running) return; - - // stop - if ((err = stop_server(bin_dir, mysqladmin_file, user, password, slave_port, slave_pid, - mysql_tmp_dir)) == 0) - { - slave_running = FALSE; - } - else - { - log_error("Unable to stop slave server."); - } -} - -/****************************************************************************** - - stop_master() - - Stop the master server. - -******************************************************************************/ -void stop_master() -{ - int err; - - // running? - if (!master_running) return; - - if ((err = stop_server(bin_dir, mysqladmin_file, user, password, master_port, master_pid, - mysql_tmp_dir)) == 0) - { - master_running = FALSE; - } - else - { - log_error("Unable to stop master server."); - } -} - -/****************************************************************************** - - mysql_stop() - - Stop the mysql servers. - -******************************************************************************/ -void mysql_stop() -{ - - stop_master(); - - stop_slave(); - - // activate the test screen -#ifdef __NETWARE__ - ActivateScreen(getscreenhandle()); -#endif -} - -/****************************************************************************** - - mysql_restart() - - Restart the mysql servers. - -******************************************************************************/ -void mysql_restart() -{ -// log_info("Restarting the MySQL server(s): %u", ++restarts); - - mysql_stop(); - - mlog(DASH); - - mysql_start(); -} - -/****************************************************************************** - - read_option() - - Read the option file. - -******************************************************************************/ -int read_option(char *opt_file, char *opt) -{ - int fd, err; - char *p; - char buf[PATH_MAX]; - - // copy current option - strncpy(buf, opt, PATH_MAX); - - // open options file - fd = open(opt_file, O_RDONLY); - - err = read(fd, opt, PATH_MAX); - - close(fd); - - if (err > 0) - { - // terminate string - if ((p = strchr(opt, '\n')) != NULL) - { - *p = 0; - - // check for a '\r' - if ((p = strchr(opt, '\r')) != NULL) - { - *p = 0; - } - } - else - { - opt[err] = 0; - } - - // check for $MYSQL_TEST_DIR - if ((p = strstr(opt, "$MYSQL_TEST_DIR")) != NULL) - { - char temp[PATH_MAX]; - - *p = 0; - - strcpy(temp, p + strlen("$MYSQL_TEST_DIR")); - - strcat(opt, mysql_test_dir); - - strcat(opt, temp); - } - // Check for double backslash and replace it with single bakslash - if ((p = strstr(opt, "\\\\")) != NULL) - { - /* bmove is guranteed to work byte by byte */ - bmove(p, p+1, strlen(p+1)); - } - } - else - { - // clear option - *opt = 0; - } - - // compare current option with previous - return strcmp(opt, buf); -} - -/****************************************************************************** - - run_test() - - Run the given test case. - -******************************************************************************/ -void run_test(char *test) -{ - char temp[PATH_MAX]; - const char *rstr; - int skip = FALSE, ignore=FALSE; - int restart = FALSE; - int flag = FALSE; - struct stat info; - - // skip tests in the skip list - snprintf(temp, PATH_MAX, " %s ", test); - skip = (strinstr(skip_test, temp) != 0); - if (skip == FALSE) - ignore = (strinstr(ignore_test, temp) != 0); - - snprintf(master_init_script, PATH_MAX, "%s/%s-master.sh", test_dir, test); - snprintf(slave_init_script, PATH_MAX, "%s/%s-slave.sh", test_dir, test); -#ifdef __WIN__ - if (! stat(master_init_script, &info)) - skip = TRUE; - if (!stat(slave_init_script, &info)) - skip = TRUE; -#endif - if (ignore) - { - // show test - mlog("%-46s ", test); - - // ignore - rstr = TEST_IGNORE; - ++total_ignore; - } - else if (!skip) // skip test? - { - char test_file[PATH_MAX]; - char master_opt_file[PATH_MAX]; - char slave_opt_file[PATH_MAX]; - char slave_master_info_file[PATH_MAX]; - char result_file[PATH_MAX]; - char reject_file[PATH_MAX]; - char out_file[PATH_MAX]; - char err_file[PATH_MAX]; - int err; - arg_list_t al; -#ifdef __WIN__ - /* - Clean test database - */ - removef("%s/test/*.*", master_dir); - removef("%s/test/*.*", slave_dir); - removef("%s/mysqltest/*.*", master_dir); - removef("%s/mysqltest/*.*", slave_dir); - -#endif - // skip slave? - flag = skip_slave; - skip_slave = (strncmp(test, "rpl", 3) != 0); - if (flag != skip_slave) restart = TRUE; - - // create files - snprintf(master_opt_file, PATH_MAX, "%s/%s-master.opt", test_dir, test); - snprintf(slave_opt_file, PATH_MAX, "%s/%s-slave.opt", test_dir, test); - snprintf(slave_master_info_file, PATH_MAX, "%s/%s.slave-mi", test_dir, test); - snprintf(reject_file, PATH_MAX, "%s/%s%s", result_dir, test, REJECT_SUFFIX); - snprintf(out_file, PATH_MAX, "%s/%s%s", result_dir, test, OUT_SUFFIX); - snprintf(err_file, PATH_MAX, "%s/%s%s", result_dir, test, ERR_SUFFIX); - - // netware specific files - snprintf(test_file, PATH_MAX, "%s/%s%s", test_dir, test, NW_TEST_SUFFIX); - if (stat(test_file, &info)) - { - snprintf(test_file, PATH_MAX, "%s/%s%s", test_dir, test, TEST_SUFFIX); - if (access(test_file,0)) - { - printf("Invalid test name %s, %s file not found\n",test,test_file); - return; - } - } - - snprintf(result_file, PATH_MAX, "%s/%s%s", result_dir, test, NW_RESULT_SUFFIX); - if (stat(result_file, &info)) - { - snprintf(result_file, PATH_MAX, "%s/%s%s", result_dir, test, RESULT_SUFFIX); - } - - // init scripts - if (stat(master_init_script, &info)) - master_init_script[0] = 0; - else - restart = TRUE; - - if (stat(slave_init_script, &info)) - slave_init_script[0] = 0; - else - restart = TRUE; - - // read options - if (read_option(master_opt_file, master_opt)) restart = TRUE; - if (read_option(slave_opt_file, slave_opt)) restart = TRUE; - if (read_option(slave_master_info_file, slave_master_info)) restart = TRUE; - - // cleanup previous run - remove(reject_file); - remove(out_file); - remove(err_file); - - // start or restart? - if (!master_running) mysql_start(); - else if (restart) mysql_restart(); - - // let the system stabalize - sleep(1); - - // show test - mlog("%-46s ", test); - - - // args - init_args(&al); - add_arg(&al, "%s", mysqltest_file); - add_arg(&al, "--no-defaults"); - add_arg(&al, "--port=%u", master_port); -#if !defined(__NETWARE__) && !defined(__WIN__) - add_arg(&al, "--socket=%s", master_socket); - add_arg(&al, "--tmpdir=%s", mysql_tmp_dir); -#endif - add_arg(&al, "--database=%s", db); - add_arg(&al, "--user=%s", user); - add_arg(&al, "--password=%s", password); - add_arg(&al, "--silent"); - add_arg(&al, "--basedir=%s/", mysql_test_dir); - add_arg(&al, "--host=127.0.0.1"); - add_arg(&al, "-v"); - add_arg(&al, "-R"); - add_arg(&al, "%s", result_file); - - if (use_openssl) - { - add_arg(&al, "--ssl-ca=%s", ca_cert); - add_arg(&al, "--ssl-cert=%s", client_cert); - add_arg(&al, "--ssl-key=%s", client_key); - } - - // spawn - err = spawn(mysqltest_file, &al, TRUE, test_file, out_file, err_file, NULL); - - // free args - free_args(&al); - - remove_empty_file(out_file); - remove_empty_file(err_file); - - if (err == 0) - { - // pass - rstr = TEST_PASS; - ++total_pass; - - // increment total - ++total_test; - } - else if (err == 2) - { - // skip - rstr = TEST_SKIP; - ++total_skip; - } - else if (err == 1) - { - // fail - rstr = TEST_FAIL; - ++total_fail; - - // increment total - ++total_test; - } - else - { - rstr = TEST_BAD; - } - } - else // early skips - { - // show test - mlog("%-46s ", test); - - // skip - rstr = TEST_SKIP; - ++total_skip; - } - - // result - mlog("%-14s\n", rstr); -} - -/****************************************************************************** - - vlog() - - Log the message. - -******************************************************************************/ -void vlog(const char *format, va_list ap) -{ - vfprintf(stdout, format, ap); - fflush(stdout); - - if (log_fd) - { - vfprintf(log_fd, format, ap); - fflush(log_fd); - } -} - -/****************************************************************************** - - log() - - Log the message. - -******************************************************************************/ -void mlog(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - - vlog(format, ap); - - va_end(ap); -} - -/****************************************************************************** - - log_info() - - Log the given information. - -******************************************************************************/ -void log_info(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - - mlog("-- INFO : "); - vlog(format, ap); - mlog("\n"); - - va_end(ap); -} - -/****************************************************************************** - - log_error() - - Log the given error. - -******************************************************************************/ -void log_error(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - - mlog("-- ERROR: "); - vlog(format, ap); - mlog("\n"); - - va_end(ap); -} - -/****************************************************************************** - - log_errno() - - Log the given error and errno. - -******************************************************************************/ -void log_errno(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - - mlog("-- ERROR: (%003u) ", errno); - vlog(format, ap); - mlog("\n"); - - va_end(ap); -} - -/****************************************************************************** - - die() - - Exit the application. - -******************************************************************************/ -void die(const char *msg) -{ - log_error(msg); -#ifdef __NETWARE__ - pressanykey(); -#endif - exit(-1); -} - -/****************************************************************************** - - setup() - - Setup the mysql test enviornment. - -******************************************************************************/ -void setup(char *file) -{ - char temp[PATH_MAX]; - char file_path[PATH_MAX*2]; - char *p; - int position; - - // set the timezone for the timestamp test -#ifdef __WIN__ - _putenv( "TZ=GMT-3" ); -#else - setenv("TZ", "GMT-3", TRUE); -#endif - // find base dir -#ifdef __NETWARE__ - strcpy(temp, strlwr(file)); - while((p = strchr(temp, '\\')) != NULL) *p = '/'; -#else - getcwd(temp, PATH_MAX); - position = strlen(temp); - temp[position] = '/'; - temp[position+1] = 0; -#ifdef __WIN__ - while((p = strchr(temp, '\\')) != NULL) *p = '/'; -#endif -#endif - - if ((position = strinstr(temp, "/mysql-test/")) != 0) - { - p = temp + position - 1; - *p = 0; - strcpy(base_dir, temp); - } - - log_info("Currect directory: %s",base_dir); - -#ifdef __NETWARE__ - // setup paths - snprintf(bin_dir, PATH_MAX, "%s/bin", base_dir); - snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir); - snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir); - snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir); - snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir); - snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir); - snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir); - snprintf(lang_dir, PATH_MAX, "%s/share/english", base_dir); - snprintf(char_dir, PATH_MAX, "%s/share/charsets", base_dir); - -#ifdef HAVE_OPENSSL - use_openssl = TRUE; -#endif // HAVE_OPENSSL - - // OpenSSL paths - snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir); - snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir); - snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir); - snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir); - snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir); - - // setup files - snprintf(mysqld_file, PATH_MAX, "%s/mysqld", bin_dir); - snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest", bin_dir); - snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin", bin_dir); - snprintf(master_pid, PATH_MAX, "%s/var/run/master.pid", mysql_test_dir); - snprintf(slave_pid, PATH_MAX, "%s/var/run/slave.pid", mysql_test_dir); -#elif __WIN__ - // setup paths -#ifdef _DEBUG - snprintf(bin_dir, PATH_MAX, "%s/client_debug", base_dir); -#else - snprintf(bin_dir, PATH_MAX, "%s/client_release", base_dir); -#endif - snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir); - snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir); - snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir); - snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir); - snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir); - snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir); - snprintf(lang_dir, PATH_MAX, "%s/share/english", base_dir); - snprintf(char_dir, PATH_MAX, "%s/share/charsets", base_dir); - -#ifdef HAVE_OPENSSL - use_openssl = TRUE; -#endif // HAVE_OPENSSL - - // OpenSSL paths - snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir); - snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir); - snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir); - snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir); - snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir); - - // setup files - snprintf(mysqld_file, PATH_MAX, "%s/mysqld.exe", bin_dir); - snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest.exe", bin_dir); - snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin.exe", bin_dir); -#else - // setup paths - snprintf(bin_dir, PATH_MAX, "%s/client", base_dir); - snprintf(mysql_test_dir, PATH_MAX, "%s/mysql-test", base_dir); - snprintf(test_dir, PATH_MAX, "%s/t", mysql_test_dir); - snprintf(mysql_tmp_dir, PATH_MAX, "%s/var/tmp", mysql_test_dir); - snprintf(result_dir, PATH_MAX, "%s/r", mysql_test_dir); - snprintf(master_dir, PATH_MAX, "%s/var/master-data", mysql_test_dir); - snprintf(slave_dir, PATH_MAX, "%s/var/slave-data", mysql_test_dir); - snprintf(lang_dir, PATH_MAX, "%s/sql/share/english", base_dir); - snprintf(char_dir, PATH_MAX, "%s/sql/share/charsets", base_dir); - -#ifdef HAVE_OPENSSL - use_openssl = TRUE; -#endif // HAVE_OPENSSL - - // OpenSSL paths - snprintf(ca_cert, PATH_MAX, "%s/SSL/cacert.pem", base_dir); - snprintf(server_cert, PATH_MAX, "%s/SSL/server-cert.pem", base_dir); - snprintf(server_key, PATH_MAX, "%s/SSL/server-key.pem", base_dir); - snprintf(client_cert, PATH_MAX, "%s/SSL/client-cert.pem", base_dir); - snprintf(client_key, PATH_MAX, "%s/SSL/client-key.pem", base_dir); - - // setup files - snprintf(mysqld_file, PATH_MAX, "%s/sql/mysqld", base_dir); - snprintf(mysqltest_file, PATH_MAX, "%s/mysqltest", bin_dir); - snprintf(mysqladmin_file, PATH_MAX, "%s/mysqladmin", bin_dir); - snprintf(master_pid, PATH_MAX, "%s/var/run/master.pid", mysql_test_dir); - snprintf(slave_pid, PATH_MAX, "%s/var/run/slave.pid", mysql_test_dir); - - snprintf(master_socket,PATH_MAX, "%s/var/tmp/master.sock", mysql_test_dir); - snprintf(slave_socket,PATH_MAX, "%s/var/tmp/slave.sock", mysql_test_dir); - -#endif - // create log file - snprintf(temp, PATH_MAX, "%s/mysql-test-run.log", mysql_test_dir); - if ((log_fd = fopen(temp, "w+")) == NULL) - { - log_errno("Unable to create log file."); - } - - // prepare skip test list - while((p = strchr(skip_test, ',')) != NULL) *p = ' '; - strcpy(temp, strlwr(skip_test)); - snprintf(skip_test, PATH_MAX, " %s ", temp); - - // environment -#ifdef __NETWARE__ - setenv("MYSQL_TEST_DIR", mysql_test_dir, 1); - snprintf(file_path, PATH_MAX*2, "%s/client/mysqldump --no-defaults -u root --port=%u", bin_dir, master_port); - setenv("MYSQL_DUMP", file_path, 1); - snprintf(file_path, PATH_MAX*2, "%s/client/mysqlbinlog --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir); - setenv("MYSQL_BINLOG", file_path, 1); -#elif __WIN__ - snprintf(file_path,MAX_PATH,"MYSQL_TEST_DIR=%s",mysql_test_dir); - _putenv(file_path); - snprintf(file_path, PATH_MAX*2, "MYSQL_DUMP=%s/mysqldump.exe --no-defaults -u root --port=%u", bin_dir, master_port); - _putenv(file_path); - snprintf(file_path, PATH_MAX*2, "MYSQL_BINLOG=%s/mysqlbinlog.exe --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir); - _putenv(file_path); -#else - setenv("MYSQL_TEST_DIR", mysql_test_dir, 1); - snprintf(file_path, PATH_MAX*2, "%s/mysqldump --no-defaults -u root --port=%u --socket=%s", bin_dir, master_port, master_socket); - setenv("MYSQL_DUMP", file_path, 1); - snprintf(file_path, PATH_MAX*2, "%s/mysqlbinlog --no-defaults --local-load=%s", bin_dir, mysql_tmp_dir); - setenv("MYSQL_BINLOG", file_path, 1); -#endif - -#ifndef __WIN__ - setenv("MASTER_MYPORT", "9306", 1); - setenv("SLAVE_MYPORT", "9307", 1); - setenv("MYSQL_TCP_PORT", "3306", 1); -#else - _putenv("MASTER_MYPORT=9306"); - _putenv("SLAVE_MYPORT=9307"); - _putenv("MYSQL_TCP_PORT=3306"); -#endif - -} - -/****************************************************************************** - - main() - -******************************************************************************/ -int main(int argc, char **argv) -{ - int is_ignore_list = 0; - // setup - setup(argv[0]); - - /* The --ignore option is comma saperated list of test cases to skip and - should be very first command line option to the test suite. - - The usage is now: - mysql_test_run --ignore=test1,test2 test3 test4 - where test1 and test2 are test cases to ignore - and test3 and test4 are test cases to run. - */ - if (argc >= 2 && !strnicmp(argv[1], "--ignore=", sizeof("--ignore=")-1)) - { - char *temp, *token; - temp= strdup(strchr(argv[1],'=') + 1); - for (token=str_tok(temp, ","); token != NULL; token=str_tok(NULL, ",")) - { - if (strlen(ignore_test) + strlen(token) + 2 <= PATH_MAX-1) - sprintf(ignore_test+strlen(ignore_test), " %s ", token); - else - { - free(temp); - die("ignore list too long."); - } - } - free(temp); - is_ignore_list = 1; - } - // header -#ifndef __WIN__ - mlog("MySQL Server %s, for %s (%s)\n\n", VERSION, SYSTEM_TYPE, MACHINE_TYPE); -#else - mlog("MySQL Server ---, for %s (%s)\n\n", SYSTEM_TYPE, MACHINE_TYPE); -#endif - - mlog("Initializing Tests...\n"); - - // install test databases - mysql_install_db(); - - mlog("Starting Tests...\n"); - - mlog("\n"); - mlog(HEADER); - mlog(DASH); - - if ( argc > 1 + is_ignore_list ) - { - int i; - - // single test - single_test = TRUE; - - for (i = 1 + is_ignore_list; i < argc; i++) - { - // run given test - run_test(argv[i]); - } - } - else - { - // run all tests -#ifndef __WIN__ - struct dirent **namelist; - int i,n; - char test[NAME_MAX]; - char *p; - int position; - - n = scandir(test_dir, &namelist, 0, alphasort); - if (n < 0) - die("Unable to open tests directory."); - else - { - for (i = 0; i < n; i++) - { - strcpy(test, strlwr(namelist[i]->d_name)); - // find the test suffix - if ((position = strinstr(test, TEST_SUFFIX)) != 0) - { - p = test + position - 1; - // null terminate at the suffix - *p = 0; - // run test - run_test(test); - } - free(namelist[n]); - } - free(namelist); - } -#else - struct _finddata_t dir; - intptr_t handle; - char test[NAME_MAX]; - char mask[PATH_MAX]; - char *p; - int position; - char **names = 0; - char **testes = 0; - int name_index; - int index; - - // single test - single_test = FALSE; - - snprintf(mask,MAX_PATH,"%s/*.test",test_dir); - - if ((handle=_findfirst(mask,&dir)) == -1L) - { - die("Unable to open tests directory."); - } - - names = malloc(MAX_COUNT_TESTES*4); - testes = names; - name_index = 0; - - do - { - if (!(dir.attrib & _A_SUBDIR)) - { - strcpy(test, strlwr(dir.name)); - - // find the test suffix - if ((position = strinstr(test, TEST_SUFFIX)) != 0) - { - p = test + position - 1; - // null terminate at the suffix - *p = 0; - - // insert test - *names = malloc(PATH_MAX); - strcpy(*names,test); - names++; - name_index++; - } - } - }while (_findnext(handle,&dir) == 0); - - _findclose(handle); - - qsort( (void *)testes, name_index, sizeof( char * ), compare ); - - for (index = 0; index <= name_index; index++) - { - run_test(testes[index]); - free(testes[index]); - } - - free(testes); -#endif - } - - // stop server - mysql_stop(); - - mlog(DASH); - mlog("\n"); - - mlog("Ending Tests...\n"); - - // report stats - report_stats(); - - // close log - if (log_fd) fclose(log_fd); - - // keep results up -#ifdef __NETWARE__ - pressanykey(); -#endif - return 0; -} - - -/* - Synopsis: - This function breaks the string into a sequence of tokens. The difference - between this function and strtok is that it respects the quoted string i.e. - it skips any delimiter character within the quoted part of the string. - It return tokens by eliminating quote character. It modifies the input string - passed. It will work with whitespace delimeter but may not work properly with - other delimeter. If the delimeter will contain any quote character, then - function will not tokenize and will return null string. - e.g. if input string is - --init-slave="set global max_connections=500" --skip-external-locking - then the output will two string i.e. - --init-slave=set global max_connections=500 - --skip-external-locking - -Arguments: - string: input string - delim: set of delimiter character -Output: - return the null terminated token of NULL. -*/ - - -char *str_tok(char *string, const char *delim) -{ - char *token; /* current token received from strtok */ - char *qt_token; /* token delimeted by the matching pair of quote */ - /* - if there are any quote chars found in the token then this variable - will hold the concatenated string to return to the caller - */ - char *ptr_token=NULL; - /* pointer to the quote character in the token from strtok */ - char *ptr_quote=NULL; - - /* See if the delimeter contains any quote character */ - if (strchr(delim,'\'') || strchr(delim,'\"')) - return NULL; - - /* repeate till we are getting some token from strtok */ - while ((token = (char*)strtok(string, delim) ) != NULL) - { - /* - make the input string NULL so that next time onward strtok can - be called with NULL input string. - */ - string = NULL; - /* - We don't need to remove any quote character for Windows version - */ -#ifndef __WIN__ - /* check if the current token contain double quote character*/ - if ((ptr_quote = (char*)strchr(token,'\"')) != NULL) - { - /* - get the matching the matching double quote in the remaining - input string - */ - qt_token = (char*)strtok(NULL,"\""); - } - /* check if the current token contain single quote character*/ - else if ((ptr_quote = (char*)strchr(token,'\'')) != NULL) - { - /* - get the matching the matching single quote in the remaining - input string - */ - qt_token = (char*)strtok(NULL,"\'"); - } -#endif - /* - if the current token does not contains any quote character then - return to the caller. - */ - if (ptr_quote == NULL) - { - /* - if there is any earlier token i.e. ptr_token then append the - current token in it and return it else return the current - token directly - */ - return ptr_token ? strcat(ptr_token,token) : token; - } - - /* - remove the quote character i.e. make NULL so that the token will - be devided in two part and later both part can be concatenated - and hence quote will be removed - */ - *ptr_quote= 0; - - /* check if ptr_token has been initialized or not */ - if (ptr_token == NULL) - { - /* initialize the ptr_token with current token */ - ptr_token= token; - /* copy entire string between matching pair of quote*/ - sprintf(ptr_token+strlen(ptr_token),"%s %s", ptr_quote+1, qt_token); - } - else - { - /* - copy the current token and entire string between matching pair - of quote - */ - if (qt_token == NULL) - { - sprintf(ptr_token+strlen(ptr_token),"%s%s", token, ptr_quote+1); - } - else - { - sprintf(ptr_token+strlen(ptr_token),"%s%s %s", token, ptr_quote+1, - qt_token ); - } - } - } - - /* return the concatenated token */ - return ptr_token; -} - -#ifndef __WIN__ - -/* - Synopsis: - This function run scripts files on Linux and Netware - -Arguments: - script_name: name of script file - -Output: - nothing -*/ -void run_init_script(const char *script_name) -{ - arg_list_t al; - int err; - - // args - init_args(&al); - add_arg(&al, sh_file); - add_arg(&al, script_name); - - // spawn - if ((err = spawn(sh_file, &al, TRUE, NULL, NULL, NULL, NULL)) != 0) - { - die("Unable to run script."); - } - - // free args - free_args(&al); -} -#endif diff --git a/mysql-test/r/ctype_big5.result b/mysql-test/r/ctype_big5.result index 44fad0cd96a..789b6e586ad 100644 --- a/mysql-test/r/ctype_big5.result +++ b/mysql-test/r/ctype_big5.result @@ -1,10 +1,58 @@ drop table if exists t1; -SET NAMES big5; -CREATE TABLE t1 (c CHAR(10) CHARACTER SET big5, KEY(c)); +SET @test_character_set= 'big5'; +SET @test_collation= 'big5_chinese_ci'; +SET @safe_character_set_server= @@character_set_server; +SET @safe_collation_server= @@collation_server; +SET character_set_server= @test_character_set; +SET collation_server= @test_collation; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (c CHAR(10), KEY(c)); +SHOW FULL COLUMNS FROM t1; +Field Type Collation Null Key Default Extra Privileges Comment +c char(10) big5_chinese_ci YES MUL NULL select,insert,update,references INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa'); -SELECT * FROM t1 WHERE c LIKE 'aaa%'; -c +SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%'; +want3results aaa aaaa aaaaa DROP TABLE t1; +CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2))); +SHOW FULL COLUMNS FROM t1; +Field Type Collation Null Key Default Extra Privileges Comment +c1 varchar(15) big5_chinese_ci YES MUL NULL select,insert,update,references +INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab'); +SELECT c1 as want3results from t1 where c1 like 'l%'; +want3results +location +loberge +lotre +SELECT c1 as want3results from t1 where c1 like 'lo%'; +want3results +location +loberge +lotre +SELECT c1 as want1result from t1 where c1 like 'loc%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'loca%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locat%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locati%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locatio%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'location%'; +want1result +location +DROP TABLE t1; +DROP DATABASE d1; +USE test; +SET character_set_server= @safe_character_set_server; +SET collation_server= @safe_collation_server; diff --git a/mysql-test/r/ctype_latin1.result b/mysql-test/r/ctype_latin1.result index a8182438ac4..355f53b63a5 100644 --- a/mysql-test/r/ctype_latin1.result +++ b/mysql-test/r/ctype_latin1.result @@ -296,3 +296,12 @@ FD C3BD FD 1 FE C3BE FE 1 FF C3BF FF 1 DROP TABLE t1; +select 'a' regexp 'A' collate latin1_general_ci; +'a' regexp 'A' collate latin1_general_ci +1 +select 'a' regexp 'A' collate latin1_general_cs; +'a' regexp 'A' collate latin1_general_cs +0 +select 'a' regexp 'A' collate latin1_bin; +'a' regexp 'A' collate latin1_bin +0 diff --git a/mysql-test/r/ctype_uca.result b/mysql-test/r/ctype_uca.result index 90681795513..cb060ad7ee4 100644 --- a/mysql-test/r/ctype_uca.result +++ b/mysql-test/r/ctype_uca.result @@ -19,6 +19,9 @@ select 'a ' = 'a\t', 'a ' < 'a\t', 'a ' > 'a\t'; select 'a a' > 'a', 'a \t' < 'a'; 'a a' > 'a' 'a \t' < 'a' 1 1 +select 'c' like '\_' as want0; +want0 +0 CREATE TABLE t ( c char(20) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; @@ -2315,3 +2318,60 @@ HEX(CONVERT(col1 USING ucs2)) 064A06A9062F064A06AF0631 064A06A9064A DROP TABLE t1; +SET @test_character_set= 'utf8'; +SET @test_collation= 'utf8_swedish_ci'; +SET @safe_character_set_server= @@character_set_server; +SET @safe_collation_server= @@collation_server; +SET character_set_server= @test_character_set; +SET collation_server= @test_collation; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (c CHAR(10), KEY(c)); +SHOW FULL COLUMNS FROM t1; +Field Type Collation Null Key Default Extra Privileges Comment +c char(10) utf8_swedish_ci YES MUL NULL select,insert,update,references +INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa'); +SELECT c as want3results FROM t1 WHERE c LIKE 'aaa%'; +want3results +aaa +aaaa +aaaaa +DROP TABLE t1; +CREATE TABLE t1 (c1 varchar(15), KEY c1 (c1(2))); +SHOW FULL COLUMNS FROM t1; +Field Type Collation Null Key Default Extra Privileges Comment +c1 varchar(15) utf8_swedish_ci YES MUL NULL select,insert,update,references +INSERT INTO t1 VALUES ('location'),('loberge'),('lotre'),('boabab'); +SELECT c1 as want3results from t1 where c1 like 'l%'; +want3results +location +loberge +lotre +SELECT c1 as want3results from t1 where c1 like 'lo%'; +want3results +location +loberge +lotre +SELECT c1 as want1result from t1 where c1 like 'loc%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'loca%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locat%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locati%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'locatio%'; +want1result +location +SELECT c1 as want1result from t1 where c1 like 'location%'; +want1result +location +DROP TABLE t1; +DROP DATABASE d1; +USE test; +SET character_set_server= @safe_character_set_server; +SET collation_server= @safe_collation_server; diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index 945ec8eae99..599d49208e7 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -814,3 +814,6 @@ t2 CREATE TABLE `t2` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t2; drop table t1; +select 'c' like '\_' as want0; +want0 +0 diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result index 5575ee1bf98..f1fba87c70b 100644 --- a/mysql-test/r/delete.result +++ b/mysql-test/r/delete.result @@ -16,12 +16,34 @@ SET AUTOCOMMIT=0; DELETE from t1; SET AUTOCOMMIT=1; drop table t1; -create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a)); -insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23); +create table t1 ( +a bigint not null, +b bigint not null default 0, +c bigint not null default 0, +d bigint not null default 0, +e bigint not null default 0, +f bigint not null default 0, +g bigint not null default 0, +h bigint not null default 0, +i bigint not null default 0, +j bigint not null default 0, +primary key (a,b,c,d,e,f,g,h,i,j)); +insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23); delete from t1 where a=26; drop table t1; -create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a)); -insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27); +create table t1 ( +a bigint not null, +b bigint not null default 0, +c bigint not null default 0, +d bigint not null default 0, +e bigint not null default 0, +f bigint not null default 0, +g bigint not null default 0, +h bigint not null default 0, +i bigint not null default 0, +j bigint not null default 0, +primary key (a,b,c,d,e,f,g,h,i,j)); +insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27); delete from t1 where a=27; drop table t1; CREATE TABLE `t1` ( diff --git a/mysql-test/r/fulltext.result b/mysql-test/r/fulltext.result index aea406515c5..7acc8a2d23f 100644 --- a/mysql-test/r/fulltext.result +++ b/mysql-test/r/fulltext.result @@ -190,6 +190,14 @@ a select * from t1 where match a against ("+aaa10 +(bbb*)" in boolean mode); a aaa10 bbb20 +select * from t1 where match a against ("+(+aaa* +bbb1*)" in boolean mode); +a +aaa20 bbb15 +aaa30 bbb10 +select * from t1 where match a against ("(+aaa* +bbb1*)" in boolean mode); +a +aaa20 bbb15 +aaa30 bbb10 drop table t1; CREATE TABLE t1 ( id int(11), @@ -391,3 +399,9 @@ select count(*) from t1; count(*) 1 drop table t1; +create table t1 (a int primary key, b text, fulltext(b)); +create table t2 (a int, b text); +insert t1 values (1, "aaaa"), (2, "bbbb"); +insert t2 values (10, "aaaa"), (2, "cccc"); +replace t1 select * from t2; +drop table t1, t2; diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index c25f89d4df3..4bb79a1cb41 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -637,8 +637,22 @@ create table t1 (a char(10)); insert into t1 values ('a'),('b'),('c'); select coercibility(max(a)) from t1; coercibility(max(a)) -3 +2 drop table t1; +create table t1 (a char character set latin2); +insert into t1 values ('a'),('b'); +select charset(max(a)), coercibility(max(a)), +charset(min(a)), coercibility(min(a)) from t1; +charset(max(a)) coercibility(max(a)) charset(min(a)) coercibility(min(a)) +latin2 2 latin2 2 +create table t2 select max(a),min(a) from t1; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `max(a)` char(1) character set latin2 default NULL, + `min(a)` char(1) character set latin2 default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t2,t1; create table t1 (a int); insert into t1 values (1); select max(a) as b from t1 having b=1; diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 2dc6bffd071..c78b16aed8a 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -474,6 +474,12 @@ unix_timestamp(@a) select unix_timestamp('1969-12-01 19:00:01'); unix_timestamp('1969-12-01 19:00:01') 0 +select from_unixtime(0); +from_unixtime(0) +NULL +select from_unixtime(2145916800); +from_unixtime(2145916800) +NULL CREATE TABLE t1 (datetime datetime, timestamp timestamp, date date, time time); INSERT INTO t1 values ("2001-01-02 03:04:05", "2002-01-02 03:04:05", "2003-01-02", "06:07:08"); SELECT * from t1; diff --git a/mysql-test/r/heap.result b/mysql-test/r/heap.result index 4950799137a..1f994b100e2 100644 --- a/mysql-test/r/heap.result +++ b/mysql-test/r/heap.result @@ -233,3 +233,10 @@ SELECT * FROM t1 WHERE B is not null; a B 1 1 DROP TABLE t1; +CREATE TABLE t1 (pseudo char(35) PRIMARY KEY, date int(10) unsigned NOT NULL) ENGINE=HEAP; +INSERT INTO t1 VALUES ('massecot',1101106491),('altec',1101106492),('stitch+',1101106304),('Seb Corgan',1101106305),('beerfilou',1101106263),('flaker',1101106529),('joce8',5),('M4vrick',1101106418),('gabay008',1101106525),('Vamp irX',1101106291),('ZoomZip',1101106546),('rip666',1101106502),('CBP ',1101106397),('guezpard',1101106496); +DELETE FROM t1 WHERE date<1101106546; +SELECT * FROM t1; +pseudo date +ZoomZip 1101106546 +DROP TABLE t1; diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index 9d830367745..009432ec3ab 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -1630,3 +1630,21 @@ show status like "binlog_cache_disk_use"; Variable_name Value Binlog_cache_disk_use 1 drop table t1; +create table t1 (c char(10), index (c,c)) engine=innodb; +ERROR 42S21: Duplicate column name 'c' +create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=innodb; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=innodb; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=innodb; +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10)) engine=innodb; +alter table t1 add key (c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c2,c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c2,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c1,c2); +ERROR 42S21: Duplicate column name 'c1' +drop table t1; diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result index e74bda23da9..cceaf393a60 100644 --- a/mysql-test/r/key.result +++ b/mysql-test/r/key.result @@ -307,3 +307,21 @@ test.t1 check status OK drop table t1; create table t1 (c char(10), index (c(0))); ERROR HY000: Key part 'c' length cannot be 0 +create table t1 (c char(10), index (c,c)); +ERROR 42S21: Duplicate column name 'c' +create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)); +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)); +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)); +ERROR 42S21: Duplicate column name 'c1' +create table t1 (c1 char(10), c2 char(10)); +alter table t1 add key (c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c2,c1,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c2,c1); +ERROR 42S21: Duplicate column name 'c1' +alter table t1 add key (c1,c1,c2); +ERROR 42S21: Duplicate column name 'c1' +drop table t1; diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 31b14f9b822..d155a14bb60 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -529,6 +529,7 @@ show keys from t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment t1 1 a 1 a A NULL NULL NULL YES BTREE disabled create table t2 (a int); +set @@rand_seed1=31415926,@@rand_seed2=2718281828; insert t1 select * from t2; show keys from t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment diff --git a/mysql-test/r/ndb_grant.result b/mysql-test/r/ndb_grant.result new file mode 100644 index 00000000000..6583065a0c4 --- /dev/null +++ b/mysql-test/r/ndb_grant.result @@ -0,0 +1,416 @@ +drop table if exists t1; +SET NAMES binary; +use mysql; +alter table columns_priv engine=ndb; +alter table db engine=ndb; +alter table func engine=ndb; +alter table help_category engine=ndb; +alter table help_keyword engine=ndb; +alter table help_relation engine=ndb; +alter table help_topic engine=ndb; +alter table host engine=ndb; +alter table tables_priv engine=ndb; +alter table time_zone engine=ndb; +alter table time_zone_leap_second engine=ndb; +alter table time_zone_name engine=ndb; +alter table time_zone_transition engine=ndb; +alter table time_zone_transition_type engine=ndb; +alter table user engine=ndb; +use test; +delete from mysql.user where user='mysqltest_1'; +delete from mysql.db where user='mysqltest_1'; +flush privileges; +begin; +grant select on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA"; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA' +GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +grant delete on mysqltest.* to mysqltest_1@localhost; +commit; +select * from mysql.user where user="mysqltest_1"; +Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections +localhost mysqltest_1 N N N N N N N N N N N N N N N N N N N N N SPECIFIED EDH-RSA-DES-CBC3-SHA 0 0 0 +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA' +GRANT SELECT, DELETE ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +revoke delete on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA' +GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +grant select on mysqltest.* to mysqltest_1@localhost require NONE; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +grant USAGE on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA" AND SUBJECT "testsubject" ISSUER "MySQL AB"; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE ISSUER 'MySQL AB' SUBJECT 'testsubject' CIPHER 'EDH-RSA-DES-CBC3-SHA' +GRANT SELECT ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +revoke all privileges on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE ISSUER 'MySQL AB' SUBJECT 'testsubject' CIPHER 'EDH-RSA-DES-CBC3-SHA' +delete from mysql.user where user='mysqltest_1'; +flush privileges; +begin; +grant CREATE TEMPORARY TABLES, LOCK TABLES on mysqltest.* to mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT CREATE TEMPORARY TABLES, LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +flush privileges; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT CREATE TEMPORARY TABLES, LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +revoke CREATE TEMPORARY TABLES on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT LOCK TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' +begin; +grant ALL PRIVILEGES on mysqltest.* to mysqltest_1@localhost with GRANT OPTION; +commit; +flush privileges; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT ALL PRIVILEGES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION +begin; +revoke LOCK TABLES, ALTER on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, REFERENCES, INDEX, CREATE TEMPORARY TABLES ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION +begin; +revoke all privileges on mysqltest.* from mysqltest_1@localhost; +commit; +delete from mysql.user where user='mysqltest_1'; +flush privileges; +begin; +grant usage on test.* to mysqltest_1@localhost with grant option; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT USAGE ON `mysqltest`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION +GRANT USAGE ON `test`.* TO 'mysqltest_1'@'localhost' WITH GRANT OPTION +delete from mysql.user where user='mysqltest_1'; +delete from mysql.db where user='mysqltest_1'; +delete from mysql.tables_priv where user='mysqltest_1'; +delete from mysql.columns_priv where user='mysqltest_1'; +flush privileges; +show grants for mysqltest_1@localhost; +ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host 'localhost' +create table t1 (a int); +begin; +GRANT select,update,insert on t1 to mysqltest_1@localhost; +GRANT select (a), update (a),insert(a), references(a) on t1 to mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT SELECT, SELECT (a), INSERT, INSERT (a), UPDATE, UPDATE (a), REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost' +select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; +table_priv column_priv +Select,Insert,Update Select,Insert,Update,References +begin; +REVOKE select (a), update on t1 from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT SELECT, INSERT, INSERT (a), REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost' +begin; +REVOKE select,update,insert,insert (a) on t1 from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' +GRANT REFERENCES (a) ON `test`.`t1` TO 'mysqltest_1'@'localhost' +begin; +GRANT select,references on t1 to mysqltest_1@localhost; +commit; +select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; +table_priv column_priv +Select,References References +begin; +grant all on test.* to mysqltest_3@localhost with grant option; +revoke all on test.* from mysqltest_3@localhost; +commit; +show grants for mysqltest_3@localhost; +Grants for mysqltest_3@localhost +GRANT USAGE ON *.* TO 'mysqltest_3'@'localhost' +GRANT USAGE ON `test`.* TO 'mysqltest_3'@'localhost' WITH GRANT OPTION +begin; +revoke grant option on test.* from mysqltest_3@localhost; +commit; +show grants for mysqltest_3@localhost; +Grants for mysqltest_3@localhost +GRANT USAGE ON *.* TO 'mysqltest_3'@'localhost' +begin; +grant all on test.t1 to mysqltest_2@localhost with grant option; +revoke all on test.t1 from mysqltest_2@localhost; +commit; +show grants for mysqltest_2@localhost; +Grants for mysqltest_2@localhost +GRANT USAGE ON *.* TO 'mysqltest_2'@'localhost' +GRANT USAGE ON `test`.`t1` TO 'mysqltest_2'@'localhost' WITH GRANT OPTION +begin; +revoke grant option on test.t1 from mysqltest_2@localhost; +commit; +show grants for mysqltest_2@localhost; +Grants for mysqltest_2@localhost +GRANT USAGE ON *.* TO 'mysqltest_2'@'localhost' +delete from mysql.user where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.db where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.tables_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.columns_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +flush privileges; +drop table t1; +begin; +GRANT FILE on mysqltest.* to mysqltest_1@localhost; +ERROR HY000: Incorrect usage of DB GRANT and GLOBAL PRIVILEGES +commit; +select 1; +1 +1 +create database mysqltest1; +begin; +grant usage on mysqltest1.* to test6123 identified by 'magic123'; +commit; +select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1"; +host db user select_priv insert_priv +delete from mysql.user where user='test6123'; +drop database mysqltest1; +create table t1 (a int); +begin; +grant ALL PRIVILEGES on *.* to drop_user2@localhost with GRANT OPTION; +commit; +show grants for drop_user2@localhost; +Grants for drop_user2@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user2'@'localhost' WITH GRANT OPTION +begin; +revoke all privileges, grant option from drop_user2@localhost; +commit; +drop user drop_user2@localhost; +begin; +grant ALL PRIVILEGES on *.* to drop_user@localhost with GRANT OPTION; +grant ALL PRIVILEGES on test.* to drop_user@localhost with GRANT OPTION; +grant select(a) on test.t1 to drop_user@localhost; +commit; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON `test`.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON `test`.`t1` TO 'drop_user'@'localhost' +set sql_mode=ansi_quotes; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON "test".* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON "test"."t1" TO 'drop_user'@'localhost' +set sql_mode=default; +set sql_quote_show_create=0; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON test.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON test.t1 TO 'drop_user'@'localhost' +set sql_mode="ansi_quotes"; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON test.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON test.t1 TO 'drop_user'@'localhost' +set sql_quote_show_create=1; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON "test".* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON "test"."t1" TO 'drop_user'@'localhost' +set sql_mode=""; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT ALL PRIVILEGES ON *.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT ALL PRIVILEGES ON `test`.* TO 'drop_user'@'localhost' WITH GRANT OPTION +GRANT SELECT (a) ON `test`.`t1` TO 'drop_user'@'localhost' +revoke all privileges, grant option from drop_user@localhost; +show grants for drop_user@localhost; +Grants for drop_user@localhost +GRANT USAGE ON *.* TO 'drop_user'@'localhost' +drop user drop_user@localhost; +begin; +revoke all privileges, grant option from drop_user@localhost; +ERROR HY000: Can't revoke all privileges, grant for one or more of the requested users +commit; +begin; +grant select(a) on test.t1 to drop_user1@localhost; +commit; +flush privileges; +begin; +grant select on test.t1 to drop_user2@localhost; +grant select on test.* to drop_user3@localhost; +grant select on *.* to drop_user4@localhost; +commit; +drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, +drop_user4@localhost; +ERROR HY000: Can't drop one or more of the requested users +begin; +revoke all privileges, grant option from drop_user1@localhost, drop_user2@localhost, +drop_user3@localhost, drop_user4@localhost; +commit; +drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, +drop_user4@localhost; +drop table t1; +begin; +grant usage on *.* to mysqltest_1@localhost identified by "password"; +grant select, update, insert on test.* to mysqltest@localhost; +commit; +show grants for mysqltest_1@localhost; +Grants for mysqltest_1@localhost +GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' IDENTIFIED BY PASSWORD '*2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19' +drop user mysqltest_1@localhost; +SET NAMES koi8r; +CREATE DATABASE ÂÄ; +USE ÂÄ; +CREATE TABLE ÔÁÂ (ËÏÌ int); +begin; +GRANT SELECT ON ÂÄ.* TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +Grants for ÀÚÅÒ@localhost +GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost' +GRANT SELECT ON `ÂÄ`.* TO 'ÀÚÅÒ'@'localhost' +begin; +REVOKE SELECT ON ÂÄ.* FROM ÀÚÅÒ@localhost; +commit; +begin; +GRANT SELECT ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +Grants for ÀÚÅÒ@localhost +GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost' +GRANT SELECT ON `ÂÄ`.`ÔÁÂ` TO 'ÀÚÅÒ'@'localhost' +begin; +REVOKE SELECT ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost; +commit; +begin; +GRANT SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +Grants for ÀÚÅÒ@localhost +GRANT USAGE ON *.* TO 'ÀÚÅÒ'@'localhost' +GRANT SELECT (ËÏÌ) ON `ÂÄ`.`ÔÁÂ` TO 'ÀÚÅÒ'@'localhost' +begin; +REVOKE SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost; +commit; +DROP DATABASE ÂÄ; +SET NAMES latin1; +USE test; +CREATE TABLE t1 (a int ); +CREATE TABLE t2 LIKE t1; +CREATE TABLE t3 LIKE t1; +CREATE TABLE t4 LIKE t1; +CREATE TABLE t5 LIKE t1; +CREATE TABLE t6 LIKE t1; +CREATE TABLE t7 LIKE t1; +CREATE TABLE t8 LIKE t1; +CREATE TABLE t9 LIKE t1; +CREATE TABLE t10 LIKE t1; +CREATE DATABASE testdb1; +CREATE DATABASE testdb2; +CREATE DATABASE testdb3; +CREATE DATABASE testdb4; +CREATE DATABASE testdb5; +CREATE DATABASE testdb6; +CREATE DATABASE testdb7; +CREATE DATABASE testdb8; +CREATE DATABASE testdb9; +CREATE DATABASE testdb10; +begin; +GRANT ALL ON testdb1.* TO testuser@localhost; +GRANT ALL ON testdb2.* TO testuser@localhost; +GRANT ALL ON testdb3.* TO testuser@localhost; +GRANT ALL ON testdb4.* TO testuser@localhost; +GRANT ALL ON testdb5.* TO testuser@localhost; +GRANT ALL ON testdb6.* TO testuser@localhost; +GRANT ALL ON testdb7.* TO testuser@localhost; +GRANT ALL ON testdb8.* TO testuser@localhost; +GRANT ALL ON testdb9.* TO testuser@localhost; +GRANT ALL ON testdb10.* TO testuser@localhost; +GRANT SELECT ON test.t1 TO testuser@localhost; +GRANT SELECT ON test.t2 TO testuser@localhost; +GRANT SELECT ON test.t3 TO testuser@localhost; +GRANT SELECT ON test.t4 TO testuser@localhost; +GRANT SELECT ON test.t5 TO testuser@localhost; +GRANT SELECT ON test.t6 TO testuser@localhost; +GRANT SELECT ON test.t7 TO testuser@localhost; +GRANT SELECT ON test.t8 TO testuser@localhost; +GRANT SELECT ON test.t9 TO testuser@localhost; +GRANT SELECT ON test.t10 TO testuser@localhost; +GRANT SELECT (a) ON test.t1 TO testuser@localhost; +GRANT SELECT (a) ON test.t2 TO testuser@localhost; +GRANT SELECT (a) ON test.t3 TO testuser@localhost; +GRANT SELECT (a) ON test.t4 TO testuser@localhost; +GRANT SELECT (a) ON test.t5 TO testuser@localhost; +GRANT SELECT (a) ON test.t6 TO testuser@localhost; +GRANT SELECT (a) ON test.t7 TO testuser@localhost; +GRANT SELECT (a) ON test.t8 TO testuser@localhost; +GRANT SELECT (a) ON test.t9 TO testuser@localhost; +GRANT SELECT (a) ON test.t10 TO testuser@localhost; +commit; +begin; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM testuser@localhost; +commit; +SHOW GRANTS FOR testuser@localhost; +Grants for testuser@localhost +GRANT USAGE ON *.* TO 'testuser'@'localhost' +DROP USER testuser@localhost; +DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +DROP DATABASE testdb1; +DROP DATABASE testdb2; +DROP DATABASE testdb3; +DROP DATABASE testdb4; +DROP DATABASE testdb5; +DROP DATABASE testdb6; +DROP DATABASE testdb7; +DROP DATABASE testdb8; +DROP DATABASE testdb9; +DROP DATABASE testdb10; +use mysql; +alter table columns_priv engine=myisam; +alter table db engine=myisam; +alter table func engine=myisam; +alter table help_category engine=myisam; +alter table help_keyword engine=myisam; +alter table help_relation engine=myisam; +alter table help_topic engine=myisam; +alter table host engine=myisam; +alter table tables_priv engine=myisam; +alter table time_zone engine=myisam; +alter table time_zone_leap_second engine=myisam; +alter table time_zone_name engine=myisam; +alter table time_zone_transition engine=myisam; +alter table time_zone_transition_type engine=myisam; +alter table user engine=myisam; +use test; +flush privileges; diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result index af9b84022ed..31b258c0a6f 100644 --- a/mysql-test/r/ndb_index_unique.result +++ b/mysql-test/r/ndb_index_unique.result @@ -44,6 +44,51 @@ a b c 7 8 3 8 2 3 drop table t1; +CREATE TABLE t1 ( +a int unsigned NOT NULL PRIMARY KEY, +b int unsigned, +c int unsigned, +UNIQUE bc(b,c) +) engine = ndb; +insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL); +select * from t1 use index (bc) where b IS NULL order by a; +a b c +2 NULL 2 +3 NULL NULL +select * from t1 use index (bc)order by a; +a b c +1 1 1 +2 NULL 2 +3 NULL NULL +4 4 NULL +select * from t1 use index (bc) order by a; +a b c +1 1 1 +2 NULL 2 +3 NULL NULL +4 4 NULL +select * from t1 use index (PRIMARY) where b IS NULL order by a; +a b c +2 NULL 2 +3 NULL NULL +select * from t1 use index (bc) where b IS NULL order by a; +a b c +2 NULL 2 +3 NULL NULL +select * from t1 use index (bc) where b IS NULL and c IS NULL order by a; +a b c +select * from t1 use index (bc) where b IS NULL and c = 2 order by a; +a b c +select * from t1 use index (bc) where b < 4 order by a; +a b c +1 1 1 +select * from t1 use index (bc) where b IS NOT NULL order by a; +a b c +1 1 1 +4 4 NULL +insert into t1 values(5,1,1); +ERROR 23000: Duplicate entry '5' for key 1 +drop table t1; CREATE TABLE t2 ( a int unsigned NOT NULL PRIMARY KEY, b int unsigned not null, @@ -87,6 +132,13 @@ a b c 7 8 3 8 2 3 drop table t2; +CREATE TABLE t2 ( +a int unsigned NOT NULL PRIMARY KEY, +b int unsigned not null, +c int unsigned, +UNIQUE USING HASH (b, c) +) engine=ndbcluster; +ERROR 42000: Column 'c' is used with UNIQUE or INDEX but is not defined as NOT NULL CREATE TABLE t3 ( a int unsigned NOT NULL, b int unsigned not null, diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 6d9cfabb5a7..4a4c8fe22e4 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -450,3 +450,24 @@ PREPARE stmt FROM 'UPDATE t1 AS P1 INNER JOIN (SELECT N FROM t1 GROUP BY N HAVIN EXECUTE stmt; DEALLOCATE PREPARE stmt; DROP TABLE t1; +prepare stmt from "select ? is null, ? is not null, ?"; +select @no_such_var is null, @no_such_var is not null, @no_such_var; +@no_such_var is null @no_such_var is not null @no_such_var +1 0 NULL +execute stmt using @no_such_var, @no_such_var, @no_such_var; +? is null ? is not null ? +1 0 NULL +set @var='abc'; +select @var is null, @var is not null, @var; +@var is null @var is not null @var +0 1 abc +execute stmt using @var, @var, @var; +? is null ? is not null ? +0 1 abc +set @var=null; +select @var is null, @var is not null, @var; +@var is null @var is not null @var +1 0 NULL +execute stmt using @var, @var, @var; +? is null ? is not null ? +1 0 NULL diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 17ed9513653..fc2b4a78469 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -221,7 +221,7 @@ update t1 set y=x; explain select * from t1, t1 t2 where t1.y = 2 and t2.x between 7 and t1.y+0; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref y y 5 const 1 Using where -1 SIMPLE t2 range x x 5 NULL 4 Using where +1 SIMPLE t2 range x x 5 NULL 4 Range checked for each record (index map: 0x1) explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= 7 and t2.x <= t1.y+0; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref y y 5 const 1 Using where @@ -237,7 +237,7 @@ id select_type table type possible_keys key key_len ref rows Extra explain select * from t1, t1 t2 where t1.y = 2 and t2.x between 0 and t1.y; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref y y 5 const 1 Using where -1 SIMPLE t2 ALL x NULL NULL NULL 9 Using where +1 SIMPLE t2 ALL x NULL NULL NULL 9 Range checked for each record (index map: 0x1) explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= 0 and t2.x <= t1.y; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref y y 5 const 1 Using where diff --git a/mysql-test/r/rpl_start_stop_slave.result b/mysql-test/r/rpl_start_stop_slave.result new file mode 100644 index 00000000000..1b4d87124d1 --- /dev/null +++ b/mysql-test/r/rpl_start_stop_slave.result @@ -0,0 +1,12 @@ +slave stop; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +reset master; +reset slave; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +slave start; +stop slave; +create table t1(n int); +start slave; +stop slave io_thread; +start slave io_thread; +drop table t1; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 32d482f5a32..53b92fe50f1 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1990,3 +1990,18 @@ ac 700 NULL drop tables t1,t2; +create table t1 (a int not null, b int not null, c int, primary key (a,b)); +insert into t1 values (1,1,1), (2,2,2), (3,3,3); +set @b:= 0; +explain select sum(a) from t1 where b > @b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 8 NULL 3 Using where; Using index +set @a:= (select sum(a) from t1 where b > @b); +explain select a from t1 where c=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +do @a:= (select sum(a) from t1 where b > @b); +explain select a from t1 where c=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +drop table t1; diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result index 95bba1d4ec7..8a0c74b3ae5 100644 --- a/mysql-test/r/type_blob.result +++ b/mysql-test/r/type_blob.result @@ -682,8 +682,8 @@ id txt 3 NULL 1 Chevy drop table t1; -CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1))); -INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,''); +CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, d varchar(1) NOT NULL DEFAULT ' ', PRIMARY KEY (i), KEY (c(1),d)); +INSERT t1 (i, c) VALUES (1,''),(2,''),(3,'asdfh'),(4,''); select max(i) from t1 where c = ''; max(i) 4 diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index 524bc9c50d4..127a54e087b 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -97,13 +97,15 @@ select * from t1 where a is null or b is null; a b drop table t1; create table t1 (t datetime); -insert into t1 values (20030102030460),(20030102036301),(20030102240401),(20030132030401),(20031302030460); +insert into t1 values (20030102030460),(20030102036301),(20030102240401), +(20030132030401),(20031302030401),(100001202030401); Warnings: Warning 1265 Data truncated for column 't' at row 1 Warning 1265 Data truncated for column 't' at row 2 Warning 1265 Data truncated for column 't' at row 3 Warning 1265 Data truncated for column 't' at row 4 Warning 1265 Data truncated for column 't' at row 5 +Warning 1265 Data truncated for column 't' at row 6 select * from t1; t 0000-00-00 00:00:00 @@ -111,14 +113,18 @@ t 0000-00-00 00:00:00 0000-00-00 00:00:00 0000-00-00 00:00:00 +0000-00-00 00:00:00 delete from t1; -insert into t1 values ("20030102030460"),("20030102036301"),("20030102240401"),("20030132030401"),("20031302030460"); +insert into t1 values +("2003-01-02 03:04:60"),("2003-01-02 03:63:01"),("2003-01-02 24:04:01"), +("2003-01-32 03:04:01"),("2003-13-02 03:04:01"), ("10000-12-02 03:04:00"); Warnings: Warning 1264 Data truncated; out of range for column 't' at row 1 Warning 1264 Data truncated; out of range for column 't' at row 2 Warning 1264 Data truncated; out of range for column 't' at row 3 Warning 1264 Data truncated; out of range for column 't' at row 4 Warning 1264 Data truncated; out of range for column 't' at row 5 +Warning 1264 Data truncated; out of range for column 't' at row 6 select * from t1; t 0000-00-00 00:00:00 @@ -126,6 +132,7 @@ t 0000-00-00 00:00:00 0000-00-00 00:00:00 0000-00-00 00:00:00 +0000-00-00 00:00:00 delete from t1; insert into t1 values ("0000-00-00 00:00:00 some trailer"),("2003-01-01 00:00:00 some trailer"); Warnings: diff --git a/mysql-test/t/ctype_big5.test b/mysql-test/t/ctype_big5.test index 9bf1808636e..b1d71a6af15 100644 --- a/mysql-test/t/ctype_big5.test +++ b/mysql-test/t/ctype_big5.test @@ -7,12 +7,6 @@ drop table if exists t1; --enable_warnings -SET NAMES big5; - -# -# Bug 1883: LIKE did not work in some cases with a key. -# -CREATE TABLE t1 (c CHAR(10) CHARACTER SET big5, KEY(c)); -INSERT INTO t1 VALUES ('aaa'),('aaaa'),('aaaaa'); -SELECT * FROM t1 WHERE c LIKE 'aaa%'; -DROP TABLE t1; +SET @test_character_set= 'big5'; +SET @test_collation= 'big5_chinese_ci'; +-- source include/ctype_common.inc diff --git a/mysql-test/t/ctype_latin1.test b/mysql-test/t/ctype_latin1.test index 14062437428..677acd9faa9 100644 --- a/mysql-test/t/ctype_latin1.test +++ b/mysql-test/t/ctype_latin1.test @@ -53,3 +53,10 @@ SELECT hex(@l:=convert(@u using latin1)), a=@l FROM t1; DROP TABLE t1; + +# +# Bug #6737: REGEXP gives wrong result with case sensitive collation +# +select 'a' regexp 'A' collate latin1_general_ci; +select 'a' regexp 'A' collate latin1_general_cs; +select 'a' regexp 'A' collate latin1_bin; diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test index 708a31d637e..11833ba9bc7 100644 --- a/mysql-test/t/ctype_uca.test +++ b/mysql-test/t/ctype_uca.test @@ -25,6 +25,11 @@ select 'a ' = 'a\t', 'a ' < 'a\t', 'a ' > 'a\t'; select 'a a' > 'a', 'a \t' < 'a'; # +# Bug #6787 LIKE not working properly with _ and utf8 data +# +select 'c' like '\_' as want0; + +# # Bug #5679 utf8_unicode_ci LIKE--trailing % doesn't equal zero characters # CREATE TABLE t ( @@ -435,3 +440,7 @@ INSERT INTO t1 VALUES (CONVERT(_ucs2 0x06280648062F0646062F USING utf8)); INSERT INTO t1 VALUES (CONVERT(_ucs2 0x06450647064506270646 USING utf8)); SELECT HEX(CONVERT(col1 USING ucs2)) FROM t1 ORDER BY col1 COLLATE utf8_persian_ci, col1 COLLATE utf8_bin; DROP TABLE t1; + +SET @test_character_set= 'utf8'; +SET @test_collation= 'utf8_swedish_ci'; +-- source include/ctype_common.inc diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index c75b1dee63c..42031be8f3c 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -660,3 +660,9 @@ create table t2 select concat(a,_utf8'') as a, concat(b,_utf8'')as b from t1; show create table t2; drop table t2; drop table t1; + +# +# Bug #6787 LIKE not working properly with _ and utf8 data +# +select 'c' like '\_' as want0; + diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test index 5f60445d765..0bf7187865d 100644 --- a/mysql-test/t/delete.test +++ b/mysql-test/t/delete.test @@ -29,12 +29,34 @@ drop table t1; # (This assumes a block size of 1024) # -create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a)); -insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23); +create table t1 ( + a bigint not null, + b bigint not null default 0, + c bigint not null default 0, + d bigint not null default 0, + e bigint not null default 0, + f bigint not null default 0, + g bigint not null default 0, + h bigint not null default 0, + i bigint not null default 0, + j bigint not null default 0, + primary key (a,b,c,d,e,f,g,h,i,j)); +insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23); delete from t1 where a=26; drop table t1; -create table t1 (a bigint not null, primary key (a,a,a,a,a,a,a,a,a,a)); -insert into t1 values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27); +create table t1 ( + a bigint not null, + b bigint not null default 0, + c bigint not null default 0, + d bigint not null default 0, + e bigint not null default 0, + f bigint not null default 0, + g bigint not null default 0, + h bigint not null default 0, + i bigint not null default 0, + j bigint not null default 0, + primary key (a,b,c,d,e,f,g,h,i,j)); +insert into t1 (a) values (2),(4),(6),(8),(10),(12),(14),(16),(18),(20),(22),(24),(26),(23),(27); delete from t1 where a=27; drop table t1; diff --git a/mysql-test/t/fulltext.test b/mysql-test/t/fulltext.test index afbe8f8117c..008e965297f 100644 --- a/mysql-test/t/fulltext.test +++ b/mysql-test/t/fulltext.test @@ -98,6 +98,8 @@ select * from t1 where match a against ("+aaa* +bbb*" in boolean mode); select * from t1 where match a against ("+aaa* +bbb1*" in boolean mode); select * from t1 where match a against ("+aaa* +ccc*" in boolean mode); select * from t1 where match a against ("+aaa10 +(bbb*)" in boolean mode); +select * from t1 where match a against ("+(+aaa* +bbb1*)" in boolean mode); +select * from t1 where match a against ("(+aaa* +bbb1*)" in boolean mode); drop table t1; # @@ -306,3 +308,16 @@ REPAIR TABLE t1; select count(*) from t1; drop table t1; +# +# bug#6784 +# mi_flush_bulk_insert (on dup key error in mi_write) +# was mangling info->dupp_key_pos +# + +create table t1 (a int primary key, b text, fulltext(b)); +create table t2 (a int, b text); +insert t1 values (1, "aaaa"), (2, "bbbb"); +insert t2 values (10, "aaaa"), (2, "cccc"); +replace t1 select * from t2; +drop table t1, t2; + diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 8d8779e9d1b..79d6112e6de 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -384,6 +384,17 @@ select coercibility(max(a)) from t1; drop table t1; # +# Bug #6658 MAX(column) returns incorrect coercibility +# +create table t1 (a char character set latin2); +insert into t1 values ('a'),('b'); +select charset(max(a)), coercibility(max(a)), + charset(min(a)), coercibility(min(a)) from t1; +create table t2 select max(a),min(a) from t1; +show create table t2; +drop table t2,t1; + +# # aggregate functions on static tables # create table t1 (a int); diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 67192c55ef9..b9e592fc5d9 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -230,6 +230,13 @@ select unix_timestamp(@a); select unix_timestamp('1969-12-01 19:00:01'); # +# Test for bug #6439 "unix_timestamp() function returns wrong datetime +# values for too big argument". It should return error instead. +# +select from_unixtime(0); +select from_unixtime(2145916800); + +# # Test types from + INTERVAL # diff --git a/mysql-test/t/heap.test b/mysql-test/t/heap.test index e1776245d9e..2eff36f3317 100644 --- a/mysql-test/t/heap.test +++ b/mysql-test/t/heap.test @@ -174,3 +174,14 @@ CREATE TABLE t1 (a INT NOT NULL, B INT, KEY(B)) ENGINE=HEAP; INSERT INTO t1 VALUES(1,1), (1,NULL); SELECT * FROM t1 WHERE B is not null; DROP TABLE t1; + +# +# Bug #6748 +# heap_rfirst() doesn't work (and never did!) +# +CREATE TABLE t1 (pseudo char(35) PRIMARY KEY, date int(10) unsigned NOT NULL) ENGINE=HEAP; +INSERT INTO t1 VALUES ('massecot',1101106491),('altec',1101106492),('stitch+',1101106304),('Seb Corgan',1101106305),('beerfilou',1101106263),('flaker',1101106529),('joce8',5),('M4vrick',1101106418),('gabay008',1101106525),('Vamp irX',1101106291),('ZoomZip',1101106546),('rip666',1101106502),('CBP ',1101106397),('guezpard',1101106496); +DELETE FROM t1 WHERE date<1101106546; +SELECT * FROM t1; +DROP TABLE t1; + diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test index a452f79f949..cf7be4f882c 100644 --- a/mysql-test/t/innodb.test +++ b/mysql-test/t/innodb.test @@ -1158,3 +1158,25 @@ show status like "binlog_cache_use"; show status like "binlog_cache_disk_use"; drop table t1; + +# +# Bug #6126: Duplicate columns in keys gives misleading error message +# +--error 1060 +create table t1 (c char(10), index (c,c)) engine=innodb; +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)) engine=innodb; +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)) engine=innodb; +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)) engine=innodb; +create table t1 (c1 char(10), c2 char(10)) engine=innodb; +--error 1060 +alter table t1 add key (c1,c1); +--error 1060 +alter table t1 add key (c2,c1,c1); +--error 1060 +alter table t1 add key (c1,c2,c1); +--error 1060 +alter table t1 add key (c1,c1,c2); +drop table t1; diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test index 5ee2f68ab83..a0a291fdf3c 100644 --- a/mysql-test/t/key.test +++ b/mysql-test/t/key.test @@ -297,3 +297,26 @@ drop table t1; --error 1105 create table t1 (c char(10), index (c(0))); + +# +# Bug #6126: Duplicate columns in keys should fail +# Bug #6252: (dup) +# +--error 1060 +create table t1 (c char(10), index (c,c)); +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c1,c2,c1)); +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c1,c1,c2)); +--error 1060 +create table t1 (c1 char(10), c2 char(10), index (c2,c1,c1)); +create table t1 (c1 char(10), c2 char(10)); +--error 1060 +alter table t1 add key (c1,c1); +--error 1060 +alter table t1 add key (c2,c1,c1); +--error 1060 +alter table t1 add key (c1,c2,c1); +--error 1060 +alter table t1 add key (c1,c1,c2); +drop table t1; diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index f9081e8769b..c8ed7910b76 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -498,11 +498,12 @@ alter table t1 disable keys; show keys from t1; create table t2 (a int); let $i=1000; +set @@rand_seed1=31415926,@@rand_seed2=2718281828; --disable_query_log while ($i) { dec $i; - eval insert t2 values (rand()*100000); + insert t2 values (rand()*100000); } --enable_query_log insert t1 select * from t2; diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test index fd7fe0e60d8..6551732adba 100644 --- a/mysql-test/t/ndb_autodiscover.test +++ b/mysql-test/t/ndb_autodiscover.test @@ -199,7 +199,7 @@ insert into t4 values (1, "Automatic"); select * from t4; # Remove the table from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t4 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ; # # Test that correct error is returned @@ -230,7 +230,7 @@ select * from t4; flush tables; # Remove the table from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t4 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ; SHOW TABLES; @@ -264,8 +264,8 @@ insert into t8 values (8, "myisam table 8"); insert into t9 values (9); # Remove t3, t5 from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t3 > /dev/null ; -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t5 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ; # Remove t6, t7 from disk system rm var/master-data/test/t6.frm > /dev/null ; system rm var/master-data/test/t7.frm > /dev/null ; @@ -306,8 +306,8 @@ insert into t8 values (8, "myisam table 8"); insert into t9 values (9); # Remove t3, t5 from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t3 > /dev/null ; -system exec $NDB_TOOLS_DIR/ndb_drop_table -d test t5 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ; # Remove t6, t7 from disk system rm var/master-data/test/t6.frm > /dev/null ; system rm var/master-data/test/t7.frm > /dev/null ; @@ -479,4 +479,4 @@ create table t10 ( insert into t10 values (1, 'kalle'); ---exec $NDB_TOOLS_DIR/ndb_drop_table -d test `$NDB_TOOLS_DIR/ndb_show_tables | grep BLOB` > /dev/null 2>&1 || true +--exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` > /dev/null 2>&1 || true diff --git a/mysql-test/t/ndb_grant.test b/mysql-test/t/ndb_grant.test new file mode 100644 index 00000000000..d3899d9972f --- /dev/null +++ b/mysql-test/t/ndb_grant.test @@ -0,0 +1,374 @@ +-- source include/have_ndb.inc +# Test of GRANT commands + +# Cleanup +--disable_warnings +drop table if exists t1; +--enable_warnings + +SET NAMES binary; + +# +# Alter mysql system tables to ndb +# make sure you alter all back in the end +# +use mysql; +alter table columns_priv engine=ndb; +alter table db engine=ndb; +alter table func engine=ndb; +alter table help_category engine=ndb; +alter table help_keyword engine=ndb; +alter table help_relation engine=ndb; +alter table help_topic engine=ndb; +alter table host engine=ndb; +alter table tables_priv engine=ndb; +alter table time_zone engine=ndb; +alter table time_zone_leap_second engine=ndb; +alter table time_zone_name engine=ndb; +alter table time_zone_transition engine=ndb; +alter table time_zone_transition_type engine=ndb; +alter table user engine=ndb; +use test; + +# +# Test that SSL options works properly +# +delete from mysql.user where user='mysqltest_1'; +delete from mysql.db where user='mysqltest_1'; +flush privileges; +begin; +grant select on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA"; +commit; +show grants for mysqltest_1@localhost; +begin; +grant delete on mysqltest.* to mysqltest_1@localhost; +commit; +select * from mysql.user where user="mysqltest_1"; +show grants for mysqltest_1@localhost; +begin; +revoke delete on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +begin; +grant select on mysqltest.* to mysqltest_1@localhost require NONE; +commit; +show grants for mysqltest_1@localhost; +begin; +grant USAGE on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA" AND SUBJECT "testsubject" ISSUER "MySQL AB"; +commit; +show grants for mysqltest_1@localhost; +begin; +revoke all privileges on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +delete from mysql.user where user='mysqltest_1'; +flush privileges; + +# +# Test that the new db privileges are stored/retrieved correctly +# + +begin; +grant CREATE TEMPORARY TABLES, LOCK TABLES on mysqltest.* to mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +flush privileges; +show grants for mysqltest_1@localhost; +begin; +revoke CREATE TEMPORARY TABLES on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +begin; +grant ALL PRIVILEGES on mysqltest.* to mysqltest_1@localhost with GRANT OPTION; +commit; +flush privileges; +show grants for mysqltest_1@localhost; +begin; +revoke LOCK TABLES, ALTER on mysqltest.* from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +begin; +revoke all privileges on mysqltest.* from mysqltest_1@localhost; +commit; +delete from mysql.user where user='mysqltest_1'; +flush privileges; +begin; +grant usage on test.* to mysqltest_1@localhost with grant option; +commit; +show grants for mysqltest_1@localhost; +delete from mysql.user where user='mysqltest_1'; +delete from mysql.db where user='mysqltest_1'; +delete from mysql.tables_priv where user='mysqltest_1'; +delete from mysql.columns_priv where user='mysqltest_1'; +flush privileges; +--error 1141 +show grants for mysqltest_1@localhost; + +# +# Test what happens when you have same table and colum level grants +# + +create table t1 (a int); +begin; +GRANT select,update,insert on t1 to mysqltest_1@localhost; +GRANT select (a), update (a),insert(a), references(a) on t1 to mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; +begin; +REVOKE select (a), update on t1 from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +begin; +REVOKE select,update,insert,insert (a) on t1 from mysqltest_1@localhost; +commit; +show grants for mysqltest_1@localhost; +begin; +GRANT select,references on t1 to mysqltest_1@localhost; +commit; +select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; +begin; +grant all on test.* to mysqltest_3@localhost with grant option; +revoke all on test.* from mysqltest_3@localhost; +commit; +show grants for mysqltest_3@localhost; +begin; +revoke grant option on test.* from mysqltest_3@localhost; +commit; +show grants for mysqltest_3@localhost; +begin; +grant all on test.t1 to mysqltest_2@localhost with grant option; +revoke all on test.t1 from mysqltest_2@localhost; +commit; +show grants for mysqltest_2@localhost; +begin; +revoke grant option on test.t1 from mysqltest_2@localhost; +commit; +show grants for mysqltest_2@localhost; +delete from mysql.user where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.db where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.tables_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +delete from mysql.columns_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; +flush privileges; +drop table t1; + +# +# Test some error conditions +# +begin; +--error 1221 +GRANT FILE on mysqltest.* to mysqltest_1@localhost; +commit; +select 1; -- To test that the previous command didn't cause problems + +# +# Bug#6123: GRANT USAGE inserts useless Db row +# +create database mysqltest1; +begin; +grant usage on mysqltest1.* to test6123 identified by 'magic123'; +commit; +select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1"; +delete from mysql.user where user='test6123'; +drop database mysqltest1; + +# +# Test for 'drop user', 'revoke privileges, grant' +# + +create table t1 (a int); +begin; +grant ALL PRIVILEGES on *.* to drop_user2@localhost with GRANT OPTION; +commit; +show grants for drop_user2@localhost; +begin; +revoke all privileges, grant option from drop_user2@localhost; +commit; +drop user drop_user2@localhost; + +begin; +grant ALL PRIVILEGES on *.* to drop_user@localhost with GRANT OPTION; +grant ALL PRIVILEGES on test.* to drop_user@localhost with GRANT OPTION; +grant select(a) on test.t1 to drop_user@localhost; +commit; +show grants for drop_user@localhost; + +# +# Bug3086 +# +set sql_mode=ansi_quotes; +show grants for drop_user@localhost; +set sql_mode=default; + +set sql_quote_show_create=0; +show grants for drop_user@localhost; +set sql_mode="ansi_quotes"; +show grants for drop_user@localhost; +set sql_quote_show_create=1; +show grants for drop_user@localhost; +set sql_mode=""; +show grants for drop_user@localhost; + +revoke all privileges, grant option from drop_user@localhost; +show grants for drop_user@localhost; +drop user drop_user@localhost; +begin; +--error 1269 +revoke all privileges, grant option from drop_user@localhost; +commit; + +begin; +grant select(a) on test.t1 to drop_user1@localhost; +commit; +flush privileges; +begin; +grant select on test.t1 to drop_user2@localhost; +grant select on test.* to drop_user3@localhost; +grant select on *.* to drop_user4@localhost; +commit; +--error 1268 +drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, +drop_user4@localhost; +begin; +revoke all privileges, grant option from drop_user1@localhost, drop_user2@localhost, +drop_user3@localhost, drop_user4@localhost; +commit; +drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, +drop_user4@localhost; +drop table t1; +begin; +grant usage on *.* to mysqltest_1@localhost identified by "password"; +grant select, update, insert on test.* to mysqltest@localhost; +commit; +show grants for mysqltest_1@localhost; +drop user mysqltest_1@localhost; + +# +# Bug #3403 Wrong encodin in SHOW GRANTS output +# +SET NAMES koi8r; +CREATE DATABASE ÂÄ; +USE ÂÄ; +CREATE TABLE ÔÁÂ (ËÏÌ int); + +begin; +GRANT SELECT ON ÂÄ.* TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +begin; +REVOKE SELECT ON ÂÄ.* FROM ÀÚÅÒ@localhost; +commit; + +begin; +GRANT SELECT ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +begin; +REVOKE SELECT ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost; +commit; + +begin; +GRANT SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ TO ÀÚÅÒ@localhost; +commit; +SHOW GRANTS FOR ÀÚÅÒ@localhost; +begin; +REVOKE SELECT (ËÏÌ) ON ÂÄ.ÔÁÂ FROM ÀÚÅÒ@localhost; +commit; + +DROP DATABASE ÂÄ; +SET NAMES latin1; + +# +# Bug #5831: REVOKE ALL PRIVILEGES, GRANT OPTION does not revoke everything +# +USE test; +CREATE TABLE t1 (a int ); +CREATE TABLE t2 LIKE t1; +CREATE TABLE t3 LIKE t1; +CREATE TABLE t4 LIKE t1; +CREATE TABLE t5 LIKE t1; +CREATE TABLE t6 LIKE t1; +CREATE TABLE t7 LIKE t1; +CREATE TABLE t8 LIKE t1; +CREATE TABLE t9 LIKE t1; +CREATE TABLE t10 LIKE t1; +CREATE DATABASE testdb1; +CREATE DATABASE testdb2; +CREATE DATABASE testdb3; +CREATE DATABASE testdb4; +CREATE DATABASE testdb5; +CREATE DATABASE testdb6; +CREATE DATABASE testdb7; +CREATE DATABASE testdb8; +CREATE DATABASE testdb9; +CREATE DATABASE testdb10; +begin; +GRANT ALL ON testdb1.* TO testuser@localhost; +GRANT ALL ON testdb2.* TO testuser@localhost; +GRANT ALL ON testdb3.* TO testuser@localhost; +GRANT ALL ON testdb4.* TO testuser@localhost; +GRANT ALL ON testdb5.* TO testuser@localhost; +GRANT ALL ON testdb6.* TO testuser@localhost; +GRANT ALL ON testdb7.* TO testuser@localhost; +GRANT ALL ON testdb8.* TO testuser@localhost; +GRANT ALL ON testdb9.* TO testuser@localhost; +GRANT ALL ON testdb10.* TO testuser@localhost; +GRANT SELECT ON test.t1 TO testuser@localhost; +GRANT SELECT ON test.t2 TO testuser@localhost; +GRANT SELECT ON test.t3 TO testuser@localhost; +GRANT SELECT ON test.t4 TO testuser@localhost; +GRANT SELECT ON test.t5 TO testuser@localhost; +GRANT SELECT ON test.t6 TO testuser@localhost; +GRANT SELECT ON test.t7 TO testuser@localhost; +GRANT SELECT ON test.t8 TO testuser@localhost; +GRANT SELECT ON test.t9 TO testuser@localhost; +GRANT SELECT ON test.t10 TO testuser@localhost; +GRANT SELECT (a) ON test.t1 TO testuser@localhost; +GRANT SELECT (a) ON test.t2 TO testuser@localhost; +GRANT SELECT (a) ON test.t3 TO testuser@localhost; +GRANT SELECT (a) ON test.t4 TO testuser@localhost; +GRANT SELECT (a) ON test.t5 TO testuser@localhost; +GRANT SELECT (a) ON test.t6 TO testuser@localhost; +GRANT SELECT (a) ON test.t7 TO testuser@localhost; +GRANT SELECT (a) ON test.t8 TO testuser@localhost; +GRANT SELECT (a) ON test.t9 TO testuser@localhost; +GRANT SELECT (a) ON test.t10 TO testuser@localhost; +commit; +begin; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM testuser@localhost; +commit; +SHOW GRANTS FOR testuser@localhost; +DROP USER testuser@localhost; +DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +DROP DATABASE testdb1; +DROP DATABASE testdb2; +DROP DATABASE testdb3; +DROP DATABASE testdb4; +DROP DATABASE testdb5; +DROP DATABASE testdb6; +DROP DATABASE testdb7; +DROP DATABASE testdb8; +DROP DATABASE testdb9; +DROP DATABASE testdb10; + +# +# Alter mysql system tables back to myisam +# +use mysql; +alter table columns_priv engine=myisam; +alter table db engine=myisam; +alter table func engine=myisam; +alter table help_category engine=myisam; +alter table help_keyword engine=myisam; +alter table help_relation engine=myisam; +alter table help_topic engine=myisam; +alter table host engine=myisam; +alter table tables_priv engine=myisam; +alter table time_zone engine=myisam; +alter table time_zone_leap_second engine=myisam; +alter table time_zone_name engine=myisam; +alter table time_zone_transition engine=myisam; +alter table time_zone_transition_type engine=myisam; +alter table user engine=myisam; +use test; +flush privileges; diff --git a/mysql-test/t/ndb_index_unique.test b/mysql-test/t/ndb_index_unique.test index bdb23949763..397a2c45a9f 100644 --- a/mysql-test/t/ndb_index_unique.test +++ b/mysql-test/t/ndb_index_unique.test @@ -30,6 +30,32 @@ select * from t1 order by a; drop table t1; +# +# Indexing NULL values +# + +CREATE TABLE t1 ( + a int unsigned NOT NULL PRIMARY KEY, + b int unsigned, + c int unsigned, + UNIQUE bc(b,c) +) engine = ndb; + +insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL); +select * from t1 use index (bc) where b IS NULL order by a; + +select * from t1 use index (bc)order by a; +select * from t1 use index (bc) order by a; +select * from t1 use index (PRIMARY) where b IS NULL order by a; +select * from t1 use index (bc) where b IS NULL order by a; +select * from t1 use index (bc) where b IS NULL and c IS NULL order by a; +select * from t1 use index (bc) where b IS NULL and c = 2 order by a; +select * from t1 use index (bc) where b < 4 order by a; +select * from t1 use index (bc) where b IS NOT NULL order by a; +-- error 1062 +insert into t1 values(5,1,1); +drop table t1; + # # Show use of UNIQUE USING HASH indexes @@ -58,6 +84,14 @@ select * from t2 order by a; drop table t2; +-- error 1121 +CREATE TABLE t2 ( + a int unsigned NOT NULL PRIMARY KEY, + b int unsigned not null, + c int unsigned, + UNIQUE USING HASH (b, c) +) engine=ndbcluster; + # # Show use of PRIMARY KEY USING HASH indexes # diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 2b3e961fc28..7fe88ad0ddc 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -458,3 +458,17 @@ EXECUTE stmt; DEALLOCATE PREPARE stmt; DROP TABLE t1; +# +# Bug#6297 "prepared statement, wrong handling of <parameter> IS NULL" +# Test that placeholders work with IS NULL/IS NOT NULL clauses. +# +prepare stmt from "select ? is null, ? is not null, ?"; +select @no_such_var is null, @no_such_var is not null, @no_such_var; +execute stmt using @no_such_var, @no_such_var, @no_such_var; +set @var='abc'; +select @var is null, @var is not null, @var; +execute stmt using @var, @var, @var; +set @var=null; +select @var is null, @var is not null, @var; +execute stmt using @var, @var, @var; + diff --git a/mysql-test/t/rpl000001.test b/mysql-test/t/rpl000001.test index 2e0ba2fff25..835af92186f 100644 --- a/mysql-test/t/rpl000001.test +++ b/mysql-test/t/rpl000001.test @@ -98,7 +98,7 @@ wait_for_slave_to_stop; # The following test can't be done because the result of Pos will differ # on different computers -# --replace_result 9306 9999 3334 9999 3335 9999 +# --replace_result $MASTER_MYPORT MASTER_PORT # show slave status; set global sql_slave_skip_counter=1; diff --git a/mysql-test/t/rpl_start_stop_slave.test b/mysql-test/t/rpl_start_stop_slave.test new file mode 100644 index 00000000000..903ff204194 --- /dev/null +++ b/mysql-test/t/rpl_start_stop_slave.test @@ -0,0 +1,34 @@ +source include/master-slave.inc; + +# +# Bug#6148 () +# +connection slave; +stop slave; + +# Let the master do lots of insertions +connection master; +create table t1(n int); +let $1=5000; +disable_query_log; +while ($1) +{ + eval insert into t1 values($1); + dec $1; +} +enable_query_log; +save_master_pos; + +connection slave; +start slave; +sleep 1; +stop slave io_thread; +start slave io_thread; +sync_with_master; + +connection master; +drop table t1; +save_master_pos; + +connection slave; +sync_with_master; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index e0f6fcbf515..19bfaa6194a 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -1282,3 +1282,22 @@ INSERT INTO `t2` VALUES (6,5,12,7,'a'),(12,0,0,7,'a'),(12,1,0,7,'a'),(12,5,5,7,' SELECT b.sc FROM (SELECT (SELECT a.access FROM t1 a WHERE a.map = op.map AND a.slave = op.pid AND a.master = 1) ac FROM t2 op WHERE op.id = 12 AND op.map = 0) b; SELECT b.ac FROM (SELECT (SELECT a.access FROM t1 a WHERE a.map = op.map AND a.slave = op.pid AND a.master = 1) ac FROM t2 op WHERE op.id = 12 AND op.map = 0) b; drop tables t1,t2; + +# +# Test for bug #6462. "Same request on same data returns different +# results." a.k.a. "Proper cleanup of subqueries is missing for +# SET and DO statements". +# +create table t1 (a int not null, b int not null, c int, primary key (a,b)); +insert into t1 values (1,1,1), (2,2,2), (3,3,3); +set @b:= 0; +# Let us check that subquery will use covering index +explain select sum(a) from t1 where b > @b; +# This should not crash -debug server due to failing assertion +set @a:= (select sum(a) from t1 where b > @b); +# And this should not falsely report index usage +explain select a from t1 where c=2; +# Same for DO statement +do @a:= (select sum(a) from t1 where b > @b); +explain select a from t1 where c=2; +drop table t1; diff --git a/mysql-test/t/type_blob.test b/mysql-test/t/type_blob.test index b67fa7a552d..f70193ddbe0 100644 --- a/mysql-test/t/type_blob.test +++ b/mysql-test/t/type_blob.test @@ -369,8 +369,8 @@ explain select * from t1 where txt='Chevy' or txt is NULL order by txt; select * from t1 where txt='Chevy' or txt is NULL order by txt; drop table t1; -CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1))); -INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,''); +CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, d varchar(1) NOT NULL DEFAULT ' ', PRIMARY KEY (i), KEY (c(1),d)); +INSERT t1 (i, c) VALUES (1,''),(2,''),(3,'asdfh'),(4,''); select max(i) from t1 where c = ''; drop table t1; diff --git a/mysql-test/t/type_datetime.test b/mysql-test/t/type_datetime.test index 47866058524..04e4a73554a 100644 --- a/mysql-test/t/type_datetime.test +++ b/mysql-test/t/type_datetime.test @@ -77,10 +77,13 @@ drop table t1; # warnings (for both strings and numbers) # create table t1 (t datetime); -insert into t1 values (20030102030460),(20030102036301),(20030102240401),(20030132030401),(20031302030460); +insert into t1 values (20030102030460),(20030102036301),(20030102240401), + (20030132030401),(20031302030401),(100001202030401); select * from t1; delete from t1; -insert into t1 values ("20030102030460"),("20030102036301"),("20030102240401"),("20030132030401"),("20031302030460"); +insert into t1 values + ("2003-01-02 03:04:60"),("2003-01-02 03:63:01"),("2003-01-02 24:04:01"), + ("2003-01-32 03:04:01"),("2003-13-02 03:04:01"), ("10000-12-02 03:04:00"); select * from t1; delete from t1; insert into t1 values ("0000-00-00 00:00:00 some trailer"),("2003-01-01 00:00:00 some trailer"); diff --git a/mysys/charset.c b/mysys/charset.c index 1388fc40c6d..cb2379f8723 100644 --- a/mysys/charset.c +++ b/mysys/charset.c @@ -228,6 +228,7 @@ static int add_collation(CHARSET_INFO *cs) } else { + uchar *sort_order= all_charsets[cs->number]->sort_order; simple_cs_init_functions(all_charsets[cs->number]); new->mbminlen= 1; new->mbmaxlen= 1; @@ -236,6 +237,16 @@ static int add_collation(CHARSET_INFO *cs) all_charsets[cs->number]->state |= MY_CS_LOADED; } all_charsets[cs->number]->state|= MY_CS_AVAILABLE; + + /* + Check if case sensitive sort order: A < a < B. + We need MY_CS_FLAG for regex library, and for + case sensitivity flag for 5.0 client protocol, + to support isCaseSensitive() method in JDBC driver + */ + if (sort_order && sort_order['A'] < sort_order['a'] && + sort_order['a'] < sort_order['B']) + all_charsets[cs->number]->state|= MY_CS_CSSORT; } } else diff --git a/ndb/docs/wl2077.txt b/ndb/docs/wl2077.txt new file mode 100644 index 00000000000..5a77c18aa2a --- /dev/null +++ b/ndb/docs/wl2077.txt @@ -0,0 +1,35 @@ + +100' * (select 1 from T1 (1M rows) where key = rand()); +1 host, 1 ndbd, api co-hosted +results in 1000 rows / sec + + wo/reset bounds w/ rb +4.1-read committed a) 4.9 b) 7.4 +4.1-read hold lock c) 4.7 d) 6.7 + +wl2077-read committed 6.4 (+30%) 10.8 (+45%) +wl2077-read hold lock 4.6 (-1%) 6.7 (+ 0%) + +-- Comparision e) +serial pk: 10.9' +batched (1000): 59' +serial uniq index: 8.4' +batched (1000): 33' +index range (1000): 186' + +---- + +load) testScanPerf -c 1 -d 1 T1 +a) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 0 T1 +b) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 1 T1 +c) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 0 T1 +d) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 1 T1 +e) testReadPerf -i 25 -c 0 -d 0 T1 + +--- music join 1db-co 2db-co + +4.1 13s 14s +4.1 wo/ blobs 1.7s 3.2s + +wl2077 12s 14s +wl2077 wo/ blobs 1.2s (-30%) 2.5s (-22%) diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am index ca2e8152352..61f55cf9d61 100644 --- a/ndb/include/Makefile.am +++ b/ndb/include/Makefile.am @@ -28,15 +28,14 @@ ndbapi/NdbIndexScanOperation.hpp \ ndbapi/ndberror.h mgmapiinclude_HEADERS = \ -mgmapi/LocalConfig.hpp \ mgmapi/mgmapi.h \ -mgmapi/mgmapi_debug.h +mgmapi/mgmapi_debug.h \ +mgmapi/mgmapi_config_parameters.h \ +mgmapi/mgmapi_config_parameters_debug.h noinst_HEADERS = \ ndb_global.h \ -ndb_net.h \ -mgmapi/mgmapi_config_parameters.h \ -mgmapi/mgmapi_config_parameters_debug.h +ndb_net.h EXTRA_DIST = debugger editline kernel logger mgmcommon \ portlib transporter util diff --git a/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/ndb/include/kernel/signaldata/ArbitSignalData.hpp index f255b8dcbbe..34b73644a13 100644 --- a/ndb/include/kernel/signaldata/ArbitSignalData.hpp +++ b/ndb/include/kernel/signaldata/ArbitSignalData.hpp @@ -94,13 +94,14 @@ public: // arbitration result LoseNodes = 41, // lose on ndb node count - WinGroups = 42, // we win, no need for arbitration - LoseGroups = 43, // we lose, missing node group - Partitioning = 44, // possible network partitioning - WinChoose = 45, // positive reply - LoseChoose = 46, // negative reply - LoseNorun = 47, // arbitrator required but not running - LoseNocfg = 48, // arbitrator required but none configured + WinNodes = 42, // win on ndb node count + WinGroups = 43, // we win, no need for arbitration + LoseGroups = 44, // we lose, missing node group + Partitioning = 45, // possible network partitioning + WinChoose = 46, // positive reply + LoseChoose = 47, // negative reply + LoseNorun = 48, // arbitrator required but not running + LoseNocfg = 49, // arbitrator required but none configured // general error codes ErrTicket = 91, // invalid arbitrator-ticket diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h index f1ef357421b..a23417f153a 100644 --- a/ndb/include/mgmapi/mgmapi.h +++ b/ndb/include/mgmapi/mgmapi.h @@ -356,11 +356,26 @@ extern "C" { /** * Create a handle to a management server * - * @return A management handle<br> - * or NULL if no management handle could be created. + * @return A management handle<br> + * or NULL if no management handle could be created. */ NdbMgmHandle ndb_mgm_create_handle(); + /** + * Set connecst string to management server + * + * @param handle Management handle + * @param connect_string Connect string to the management server, + * + * @return -1 on error. + */ + int ndb_mgm_set_connectstring(NdbMgmHandle handle, + const char *connect_string); + + int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle); + int ndb_mgm_get_connected_port(NdbMgmHandle handle); + const char *ndb_mgm_get_connected_host(NdbMgmHandle handle); + /** * Destroy a management server handle * @@ -378,11 +393,10 @@ extern "C" { * Connect to a management server * * @param handle Management handle. - * @param mgmsrv Hostname and port of the management server, - * "hostname:port". * @return -1 on error. */ - int ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv); + int ndb_mgm_connect(NdbMgmHandle handle, int no_retries, + int retry_delay_in_seconds, int verbose); /** * Disconnect from a management server @@ -709,9 +723,7 @@ extern "C" { void ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *); int ndb_mgm_alloc_nodeid(NdbMgmHandle handle, - unsigned version, - unsigned *pnodeid, - int nodetype); + unsigned version, int nodetype); /** * Config iterator */ diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp index 6c32255e921..80449628867 100644 --- a/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -20,7 +20,6 @@ #include <ndb_types.h> #include <mgmapi.h> #include <BaseString.hpp> -#include <LocalConfig.hpp> /** * @class ConfigRetriever @@ -28,10 +27,11 @@ */ class ConfigRetriever { public: - ConfigRetriever(LocalConfig &local_config, Uint32 version, Uint32 nodeType); + ConfigRetriever(const char * _connect_string, + Uint32 version, Uint32 nodeType); ~ConfigRetriever(); - int do_connect(int exit_on_connect_failure= false); + int do_connect(int no_retries, int retry_delay_in_seconds, int verbose); /** * Get configuration for current node. @@ -46,12 +46,14 @@ public: */ struct ndb_mgm_configuration * getConfig(); + void resetError(); + int hasError(); const char * getErrorString(); /** * @return Node id of this node (as stated in local config or connectString) */ - Uint32 allocNodeId(); + Uint32 allocNodeId(int no_retries, int retry_delay_in_seconds); /** * Get config using socket @@ -68,22 +70,26 @@ public: */ bool verifyConfig(const struct ndb_mgm_configuration *, Uint32 nodeid); - Uint32 get_mgmd_port() const {return m_mgmd_port;}; - const char *get_mgmd_host() const {return m_mgmd_host;}; + Uint32 get_mgmd_port() const; + const char *get_mgmd_host() const; + + Uint32 get_configuration_nodeid() const; private: BaseString errorString; enum ErrorType { - CR_ERROR = 0, - CR_RETRY = 1 + CR_NO_ERROR = 0, + CR_ERROR = 1, + CR_RETRY = 2 }; ErrorType latestErrorType; void setError(ErrorType, const char * errorMsg); - struct LocalConfig& _localConfig; - Uint32 _ownNodeId; + Uint32 _ownNodeId; + /* Uint32 m_mgmd_port; const char *m_mgmd_host; + */ Uint32 m_version; Uint32 m_node_type; diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp index 7af5d27b922..256199dced7 100644 --- a/ndb/include/ndbapi/NdbConnection.hpp +++ b/ndb/include/ndbapi/NdbConnection.hpp @@ -607,8 +607,8 @@ private: NdbOperation* theLastExecOpInList; // Last executing operation in list. - NdbOperation* theCompletedFirstOp; // First operation in completed - // operation list. + NdbOperation* theCompletedFirstOp; // First & last operation in completed + NdbOperation* theCompletedLastOp; // operation list. Uint32 theNoOfOpSent; // How many operations have been sent Uint32 theNoOfOpCompleted; // How many operations have completed diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp index 66b3fc9d43b..a3388f62f58 100644 --- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp +++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp @@ -113,7 +113,7 @@ public: * Reset bounds and put operation in list that will be * sent on next execute */ - int reset_bounds(); + int reset_bounds(bool forceSend = false); bool getSorted() const { return m_ordered; } private: @@ -127,8 +127,8 @@ private: virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*); void fix_get_values(); - int next_result_ordered(bool fetchAllowed); - int send_next_scan_ordered(Uint32 idx); + int next_result_ordered(bool fetchAllowed, bool forceSend = false); + int send_next_scan_ordered(Uint32 idx, bool forceSend = false); int compare(Uint32 key, Uint32 cols, const NdbReceiver*, const NdbReceiver*); Uint32 m_sort_columns; diff --git a/ndb/include/ndbapi/NdbResultSet.hpp b/ndb/include/ndbapi/NdbResultSet.hpp index 478daf8aad2..dc0288a380c 100644 --- a/ndb/include/ndbapi/NdbResultSet.hpp +++ b/ndb/include/ndbapi/NdbResultSet.hpp @@ -89,17 +89,17 @@ public: * - 1: if there are no more tuples to scan. * - 2: if there are no more cached records in NdbApi */ - int nextResult(bool fetchAllowed = true); + int nextResult(bool fetchAllowed = true, bool forceSend = false); /** * Close result set (scan) */ - void close(); + void close(bool forceSend = false); /** * Restart */ - int restart(); + int restart(bool forceSend = false); /** * Transfer scan operation to an updating transaction. Use this function diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp index 2e4d173ac75..3c95c79e776 100644 --- a/ndb/include/ndbapi/NdbScanOperation.hpp +++ b/ndb/include/ndbapi/NdbScanOperation.hpp @@ -90,11 +90,11 @@ protected: NdbScanOperation(Ndb* aNdb); virtual ~NdbScanOperation(); - int nextResult(bool fetchAllowed = true); + int nextResult(bool fetchAllowed = true, bool forceSend = false); virtual void release(); - void closeScan(); - int close_impl(class TransporterFacade*); + void closeScan(bool forceSend = false); + int close_impl(class TransporterFacade*, bool forceSend = false); // Overloaded methods from NdbCursorOperation int executeCursor(int ProcessorId); @@ -103,6 +103,7 @@ protected: int init(const NdbTableImpl* tab, NdbConnection* myConnection); int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId); int doSend(int ProcessorId); + void checkForceSend(bool forceSend); virtual void setErrorCode(int aErrorCode); virtual void setErrorCodeAbort(int aErrorCode); @@ -138,7 +139,7 @@ protected: Uint32 m_sent_receivers_count; // NOTE needs mutex to access NdbReceiver** m_sent_receivers; // receive thread puts them here - int send_next_scan(Uint32 cnt, bool close); + int send_next_scan(Uint32 cnt, bool close, bool forceSend = false); void receiver_delivered(NdbReceiver*); void receiver_completed(NdbReceiver*); void execCLOSE_SCAN_REP(); @@ -148,7 +149,7 @@ protected: Uint32 m_ordered; - int restart(); + int restart(bool forceSend = false); }; inline diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/ndb/include/ndbapi/ndb_cluster_connection.hpp index f8e6f25ce73..59d5a038844 100644 --- a/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ b/ndb/include/ndbapi/ndb_cluster_connection.hpp @@ -19,7 +19,6 @@ #define CLUSTER_CONNECTION_HPP class TransporterFacade; -class LocalConfig; class ConfigRetriever; class NdbThread; @@ -38,7 +37,6 @@ private: void connect_thread(); char *m_connect_string; TransporterFacade *m_facade; - LocalConfig *m_local_config; ConfigRetriever *m_config_retriever; NdbThread *m_connect_thread; int (*m_connect_callback)(void); diff --git a/ndb/include/util/ndb_opts.h b/ndb/include/util/ndb_opts.h index 6cba9c04449..f7ae3b5489e 100644 --- a/ndb/include/util/ndb_opts.h +++ b/ndb/include/util/ndb_opts.h @@ -32,10 +32,13 @@ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ { "version", 'V', "Output version information and exit.", 0, 0, 0, \ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ - { "connect-string", 'c', \ + { "ndb-connectstring", 'c', \ "Set connect string for connecting to ndb_mgmd. " \ - "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". " \ - "Overides specifying entries in NDB_CONNECTSTRING and config file", \ + "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \ + "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \ + (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \ + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ + { "connect-string", 'c', "same as --ndb-connectstring",\ (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 } #else @@ -46,11 +49,14 @@ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ { "version", 'V', "Output version information and exit.", 0, 0, 0, \ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ - { "connect-string", 'c', \ + { "ndb-connectstring", 'c', \ "Set connect string for connecting to ndb_mgmd. " \ - "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". " \ - "Overides specifying entries in NDB_CONNECTSTRING and config file", \ + "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \ + "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \ (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \ + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ + { "connect-string", 'c', "same as --ndb-connectstring",\ + (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,\ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 } #endif diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index 8a09be9a0a7..59be0affcb4 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -421,6 +421,11 @@ EventLogger::getText(char * m_text, size_t m_text_len, "%sArbitration check lost - less than 1/2 nodes left", theNodeId); break; + case ArbitCode::WinNodes: + BaseString::snprintf(m_text, m_text_len, + "%sArbitration check won - all node groups and more than 1/2 nodes left", + theNodeId); + break; case ArbitCode::WinGroups: BaseString::snprintf(m_text, m_text_len, "%sArbitration check won - node group majority", diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/ndb/src/common/debugger/signaldata/ScanTab.cpp index 72a4d9f94b9..0755ee0a856 100644 --- a/ndb/src/common/debugger/signaldata/ScanTab.cpp +++ b/ndb/src/common/debugger/signaldata/ScanTab.cpp @@ -30,13 +30,14 @@ printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv fprintf(output, " apiConnectPtr: H\'%.8x", sig->apiConnectPtr); fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo); - fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u\n", + fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u ReadCommitted: %u\n", sig->getParallelism(requestInfo), sig->getScanBatch(requestInfo), sig->getLockMode(requestInfo), + sig->getKeyinfoFlag(requestInfo), sig->getHoldLockFlag(requestInfo), sig->getRangeScanFlag(requestInfo), - sig->getKeyinfoFlag(requestInfo)); + sig->getReadCommittedFlag(requestInfo)); Uint32 keyLen = (sig->attrLenKeyLen >> 16); Uint32 attrLen = (sig->attrLenKeyLen & 0xFFFF); diff --git a/ndb/src/common/logger/LogHandler.cpp b/ndb/src/common/logger/LogHandler.cpp index 4fab957fc50..a76cb622878 100644 --- a/ndb/src/common/logger/LogHandler.cpp +++ b/ndb/src/common/logger/LogHandler.cpp @@ -117,10 +117,9 @@ LogHandler::parseParams(const BaseString &_params) { _params.split(v_args, ","); for(size_t i=0; i < v_args.size(); i++) { Vector<BaseString> v_param_value; - - v_args[i].split(v_param_value, "=", 2); - if(v_param_value.size() == 2 && - !setParam(v_param_value[0], v_param_value[1])) + if(v_args[i].split(v_param_value, "=", 2) != 2) + ret = false; + else if (!setParam(v_param_value[0], v_param_value[1])) ret = false; } diff --git a/ndb/src/common/logger/Logger.cpp b/ndb/src/common/logger/Logger.cpp index 00a2fae67bc..1dc3bd43716 100644 --- a/ndb/src/common/logger/Logger.cpp +++ b/ndb/src/common/logger/Logger.cpp @@ -169,10 +169,13 @@ Logger::addHandler(const BaseString &logstring) { size_t i; Vector<BaseString> logdest; Vector<LogHandler *>loghandlers; + DBUG_ENTER("Logger::addHandler"); logstring.split(logdest, ";"); for(i = 0; i < logdest.size(); i++) { + DBUG_PRINT("info",("adding: %s",logdest[i])); + Vector<BaseString> v_type_args; logdest[i].split(v_type_args, ":", 2); @@ -191,16 +194,16 @@ Logger::addHandler(const BaseString &logstring) { handler = new ConsoleLogHandler(); if(handler == NULL) - return false; + DBUG_RETURN(false); if(!handler->parseParams(params)) - return false; + DBUG_RETURN(false); loghandlers.push_back(handler); } for(i = 0; i < loghandlers.size(); i++) addHandler(loghandlers[i]); - return true; /* @todo handle errors */ + DBUG_RETURN(true); /* @todo handle errors */ } bool diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index a1b979f62d8..0af5eb2f83c 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -20,7 +20,6 @@ #include <ConfigRetriever.hpp> #include <SocketServer.hpp> -#include "LocalConfig.hpp" #include <NdbSleep.h> #include <NdbOut.hpp> @@ -45,90 +44,62 @@ //**************************************************************************** //**************************************************************************** -ConfigRetriever::ConfigRetriever(LocalConfig &local_config, +ConfigRetriever::ConfigRetriever(const char * _connect_string, Uint32 version, Uint32 node_type) - : _localConfig(local_config) { - m_handle= 0; m_version = version; m_node_type = node_type; - _ownNodeId = _localConfig._ownNodeId; -} + _ownNodeId= 0; -ConfigRetriever::~ConfigRetriever(){ + m_handle= ndb_mgm_create_handle(); + if (m_handle == 0) { + setError(CR_ERROR, "Unable to allocate mgm handle"); + return; + } + + if (ndb_mgm_set_connectstring(m_handle, _connect_string)) + { + setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); + return; + } + resetError(); +} + +ConfigRetriever::~ConfigRetriever() +{ if (m_handle) { ndb_mgm_disconnect(m_handle); ndb_mgm_destroy_handle(&m_handle); } } +Uint32 +ConfigRetriever::get_configuration_nodeid() const +{ + return ndb_mgm_get_configuration_nodeid(m_handle); +} + +Uint32 ConfigRetriever::get_mgmd_port() const +{ + return ndb_mgm_get_connected_port(m_handle); +} + +const char *ConfigRetriever::get_mgmd_host() const +{ + return ndb_mgm_get_connected_host(m_handle); +} //**************************************************************************** //**************************************************************************** int -ConfigRetriever::do_connect(int exit_on_connect_failure){ - - m_mgmd_port= 0; - m_mgmd_host= 0; - - if(!m_handle) - m_handle= ndb_mgm_create_handle(); - - if (m_handle == 0) { - setError(CR_ERROR, "Unable to allocate mgm handle"); - return -1; - } - - int retry = 1; - int retry_max = 12; // Max number of retry attempts - int retry_interval= 5; // Seconds between each retry - while(retry < retry_max){ - Uint32 type = CR_ERROR; - BaseString tmp; - for (unsigned int i = 0; i<_localConfig.ids.size(); i++){ - MgmtSrvrId * m = &_localConfig.ids[i]; - DBUG_PRINT("info",("trying %s:%d", - m->name.c_str(), - m->port)); - switch(m->type){ - case MgmId_TCP: - tmp.assfmt("%s:%d", m->name.c_str(), m->port); - if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) { - m_mgmd_port= m->port; - m_mgmd_host= m->name.c_str(); - DBUG_PRINT("info",("connected to ndb_mgmd at %s:%d", - m_mgmd_host, - m_mgmd_port)); - return 0; - } - setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle)); - case MgmId_File: - break; - } - } - if(latestErrorType == CR_RETRY){ - DBUG_PRINT("info",("CR_RETRY")); - if (exit_on_connect_failure) - return 1; - REPORT_WARNING("Failed to retrieve cluster configuration"); - ndbout << "(Cause of failure: " << getErrorString() << ")" << endl; - ndbout << "Attempt " << retry << " of " << retry_max << ". " - << "Trying again in "<< retry_interval <<" seconds..." - << endl << endl; - NdbSleep_SecSleep(retry_interval); - } else { - break; - } - retry++; - } - - ndb_mgm_destroy_handle(&m_handle); - m_handle= 0; - m_mgmd_port= 0; - m_mgmd_host= 0; - return -1; +ConfigRetriever::do_connect(int no_retries, + int retry_delay_in_seconds, int verbose) +{ + return + (ndb_mgm_connect(m_handle,no_retries,retry_delay_in_seconds,verbose)==0) ? + 0 : -1; } //**************************************************************************** @@ -140,22 +111,9 @@ ConfigRetriever::getConfig() { struct ndb_mgm_configuration * p = 0; - if(m_handle != 0){ + if(m_handle != 0) p = getConfig(m_handle); - } else { - for (unsigned int i = 0; i<_localConfig.ids.size(); i++){ - MgmtSrvrId * m = &_localConfig.ids[i]; - switch(m->type){ - case MgmId_File: - p = getConfig(m->name.c_str()); - break; - case MgmId_TCP: - break; - } - if(p) - break; - } - } + if(p == 0) return 0; @@ -227,6 +185,16 @@ ConfigRetriever::setError(ErrorType et, const char * s){ latestErrorType = et; } +void +ConfigRetriever::resetError(){ + setError(CR_NO_ERROR,0); +} + +int +ConfigRetriever::hasError() +{ + return latestErrorType != CR_NO_ERROR; +} const char * ConfigRetriever::getErrorString(){ @@ -341,16 +309,23 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 } Uint32 -ConfigRetriever::allocNodeId(){ - unsigned nodeid= _ownNodeId; - - if(m_handle != 0){ - int res= ndb_mgm_alloc_nodeid(m_handle, m_version, &nodeid, m_node_type); - if(res != 0) { - setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); - return 0; +ConfigRetriever::allocNodeId(int no_retries, int retry_delay_in_seconds) +{ + _ownNodeId= 0; + if(m_handle != 0) + { + while (1) + { + int res= ndb_mgm_alloc_nodeid(m_handle, m_version, m_node_type); + if(res >= 0) + return _ownNodeId= (Uint32)res; + if (no_retries == 0) + break; + no_retries--; + NdbSleep_SecSleep(retry_delay_in_seconds); } - } - - return _ownNodeId= nodeid; + setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); + } else + setError(CR_ERROR, "management server handle not initialized"); + return 0; } diff --git a/ndb/src/common/util/version.c b/ndb/src/common/util/version.c index 82acd949c46..f2b3d5bd522 100644 --- a/ndb/src/common/util/version.c +++ b/ndb/src/common/util/version.c @@ -70,7 +70,6 @@ struct NdbUpGradeCompatible { #ifndef TEST_VERSION struct NdbUpGradeCompatible ndbCompatibleTable_full[] = { { MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact }, - { MAKE_VERSION(4,1,8), MAKE_VERSION(3,5,4), UG_Exact }, /* Aligned version with MySQL */ { 0, 0, UG_Null } }; diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 7ff03684cff..5193d3eae9d 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -1,7 +1,7 @@ Next QMGR 1 Next NDBCNTR 1000 Next NDBFS 2000 -Next DBACC 3001 +Next DBACC 3002 Next DBTUP 4013 Next DBLQH 5042 Next DBDICT 6006 @@ -393,6 +393,7 @@ Failed Create Table: -------------------- 7173: Create table failed due to not sufficient number of fragment or replica records. +3001: Fail create 1st fragment 4007 12001: Fail create 1st fragment 4008 12002: Fail create 2nd fragment 4009 12003: Fail create 1st attribute in 1st fragment diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index c275e5382f7..5c7cc597672 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -1062,7 +1062,21 @@ void Dbacc::execACCFRAGREQ(Signal* signal) { const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; jamEntry(); + if (ERROR_INSERTED(3001)) { + jam(); + addFragRefuse(signal, 1); + CLEAR_ERROR_INSERT_VALUE; + return; + } tabptr.i = req->tableId; +#ifndef VM_TRACE + // config mismatch - do not crash if release compiled + if (tabptr.i >= ctablesize) { + jam(); + addFragRefuse(signal, 800); + return; + } +#endif ptrCheckGuard(tabptr, ctablesize, tabrec); ndbrequire((req->reqInfo & 0xF) == ZADDFRAG); ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId)); diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 14fa262f871..0a2d50cb876 100644 --- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -147,7 +147,6 @@ public: Uint32 nfConnect; Uint32 table; Uint32 userpointer; - Uint32 nodeCount; BlockReference userblockref; }; typedef Ptr<ConnectRecord> ConnectRecordPtr; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 76aa745c3e0..4592b121c7e 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -7080,24 +7080,22 @@ void Dbdih::execDIGETPRIMREQ(Signal* signal) ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE); connectPtr.i = signal->theData[0]; - if(connectPtr.i != RNIL){ + if(connectPtr.i != RNIL) + { jam(); ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - ndbrequire(connectPtr.p->connectState == ConnectRecord::INUSE); - getFragstore(tabPtr.p, fragId, fragPtr); - connectPtr.p->nodeCount = extractNodeInfo(fragPtr.p, connectPtr.p->nodes); signal->theData[0] = connectPtr.p->userpointer; - signal->theData[1] = passThrough; - signal->theData[2] = connectPtr.p->nodes[0]; - sendSignal(connectPtr.p->userblockref, GSN_DIGETPRIMCONF, signal, 3, JBB); - return; - }//if - //connectPtr.i == RNIL -> question without connect record + } + else + { + jam(); + signal->theData[0] = RNIL; + } + Uint32 nodes[MAX_REPLICAS]; getFragstore(tabPtr.p, fragId, fragPtr); Uint32 count = extractNodeInfo(fragPtr.p, nodes); - signal->theData[0] = RNIL; signal->theData[1] = passThrough; signal->theData[2] = nodes[0]; signal->theData[3] = nodes[1]; diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 739c3c741fb..0c63cb5fe17 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -550,6 +550,11 @@ public: UintR scanErrorCounter; UintR scanLocalFragid; UintR scanSchemaVersion; + + /** + * This is _always_ main table, even in range scan + * in which case scanTcrec->fragmentptr is different + */ Uint32 fragPtrI; UintR scanStoredProcId; ScanState scanState; @@ -2925,4 +2930,23 @@ Dblqh::ScanRecord::check_scan_batch_completed() const (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes)); } +inline +void +Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index) +{ + if (index == 0) { + acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0]; + } else { + Uint32 attr_buf_index, attr_buf_rec; + + AttrbufPtr regAttrPtr; + jam(); + attr_buf_rec= (index + 31) / 32; + attr_buf_index= (index - 1) & 31; + regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec]; + ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf); + acc_ptr= (Uint32*)®AttrPtr.p->attrbuf[attr_buf_index]; + } +} + #endif diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 5622706a96c..c106a6ddfac 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -3084,6 +3084,7 @@ void Dblqh::execATTRINFO(Signal* signal) return; break; default: + ndbout_c("%d", regTcPtr->transactionState); ndbrequire(false); break; }//switch @@ -7161,10 +7162,7 @@ void Dblqh::continueScanNextReqLab(Signal* signal) // Update timer on tcConnectRecord tcConnectptr.p->tcTimer = cLqhTimeOutCount; - init_acc_ptr_list(scanptr.p); - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT; scanNextLoopLab(signal); }//Dblqh::continueScanNextReqLab() @@ -7363,22 +7361,32 @@ void Dblqh::scanLockReleasedLab(Signal* signal) tcConnectptr.i = scanptr.p->scanTcrec; ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); releaseActiveFrag(signal); + if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) { if ((scanptr.p->scanErrorCounter > 0) || (scanptr.p->scanCompletedStatus == ZTRUE)) { jam(); + scanptr.p->m_curr_batch_size_rows = 0; + scanptr.p->m_curr_batch_size_bytes = 0; closeScanLab(signal); } else if (scanptr.p->check_scan_batch_completed() && scanptr.p->scanLockHold != ZTRUE) { jam(); scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; sendScanFragConf(signal, ZFALSE); + } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) { + jam(); + closeScanLab(signal); + return; } else { jam(); /* - We came here after releasing locks after receiving SCAN_NEXTREQ from TC. We only - come here when scanHoldLock == ZTRUE - */ + * We came here after releasing locks after + * receiving SCAN_NEXTREQ from TC. We only come here + * when scanHoldLock == ZTRUE + */ + scanptr.p->m_curr_batch_size_rows = 0; + scanptr.p->m_curr_batch_size_bytes = 0; continueScanNextReqLab(signal); }//if } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) { @@ -7465,25 +7473,6 @@ Dblqh::init_acc_ptr_list(ScanRecord* scanP) scanP->scan_acc_index = 0; } -inline -void -Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index) -{ - if (index == 0) { - acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0]; - } else { - Uint32 attr_buf_index, attr_buf_rec; - - AttrbufPtr regAttrPtr; - jam(); - attr_buf_rec= (index + 31) / 32; - attr_buf_index= (index - 1) & 31; - regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec]; - ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf); - acc_ptr= (Uint32*)®AttrPtr.p->attrbuf[attr_buf_index]; - } -} - Uint32 Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP, Uint32 index, @@ -7714,6 +7703,9 @@ void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){ jam(); scanptr.i = scan_ptr_i; c_scanRecordPool.getPtr(scanptr); + + fragptr.i = tcConnectptr.p->fragmentptr; + ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); finishScanrec(signal); releaseScanrec(signal); tcConnectptr.p->transactionState = TcConnectionrec::IDLE; @@ -8007,6 +7999,13 @@ void Dblqh::nextScanConfScanLab(Signal* signal) /************************************************************* * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. ************************************************************ */ + if (!scanptr.p->scanLockHold) + { + jam(); + closeScanLab(signal); + return; + } + if (scanptr.p->scanCompletedStatus == ZTRUE) { if ((scanptr.p->scanLockHold == ZTRUE) && (scanptr.p->m_curr_batch_size_rows > 0)) { @@ -8507,8 +8506,6 @@ void Dblqh::tupScanCloseConfLab(Signal* signal) ScanFragRef::SignalLength, JBB); } else { jam(); - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; sendScanFragConf(signal, ZSCAN_FRAG_CLOSED); }//if finishScanrec(signal); @@ -8580,7 +8577,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) tFragPtr.i = fragptr.p->tableFragptr; ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord); scanptr.p->fragPtrI = fragptr.p->tableFragptr; - + /** * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11 * idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42) @@ -8589,10 +8586,10 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) Uint32 stop = (idx ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1); stop += start; Uint32 free = tFragPtr.p->m_scanNumberMask.find(start); - + if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){ jam(); - + if(scanPrio == 0){ jam(); return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR; @@ -8603,16 +8600,15 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) */ scanptr.p->scanState = ScanRecord::IN_QUEUE; LocalDLFifoList<ScanRecord> queue(c_scanRecordPool, - tFragPtr.p->m_queuedScans); + fragptr.p->m_queuedScans); queue.add(scanptr); return ZOK; } - scanptr.p->scanNumber = free; tFragPtr.p->m_scanNumberMask.clear(free);// Update mask - - LocalDLList<ScanRecord> active(c_scanRecordPool, tFragPtr.p->m_activeScans); + + LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans); active.add(scanptr); if(scanptr.p->scanKeyinfoFlag){ jam(); @@ -8672,12 +8668,8 @@ void Dblqh::finishScanrec(Signal* signal) { release_acc_ptr_list(scanptr.p); - FragrecordPtr tFragPtr; - tFragPtr.i = scanptr.p->fragPtrI; - ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord); - LocalDLFifoList<ScanRecord> queue(c_scanRecordPool, - tFragPtr.p->m_queuedScans); + fragptr.p->m_queuedScans); if(scanptr.p->scanState == ScanRecord::IN_QUEUE){ jam(); @@ -8695,9 +8687,13 @@ void Dblqh::finishScanrec(Signal* signal) ndbrequire(tmp.p == scanptr.p); } - LocalDLList<ScanRecord> scans(c_scanRecordPool, tFragPtr.p->m_activeScans); + LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans); scans.release(scanptr); + FragrecordPtr tFragPtr; + tFragPtr.i = scanptr.p->fragPtrI; + ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord); + const Uint32 scanNumber = scanptr.p->scanNumber; ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber)); ScanRecordPtr restart; @@ -8724,7 +8720,7 @@ void Dblqh::finishScanrec(Signal* signal) ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); restart.p->scanNumber = scanNumber; restart.p->scanState = ScanRecord::WAIT_ACC_SCAN; - + queue.remove(restart); scans.add(restart); if(restart.p->scanKeyinfoFlag){ @@ -8912,6 +8908,13 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted) conf->total_len= total_len; sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF, signal, ScanFragConf::SignalLength, JBB); + + if(!scanptr.p->scanLockHold) + { + jam(); + scanptr.p->m_curr_batch_size_rows = 0; + scanptr.p->m_curr_batch_size_bytes= 0; + } }//Dblqh::sendScanFragConf() /* ######################################################################### */ diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index a209df24c44..fb90ccc8c90 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1054,9 +1054,8 @@ public: // Id of the ScanRecord this fragment scan belongs to Uint32 scanRec; - // The maximum number of operations that can be scanned before - // returning to TC - Uint16 scanFragConcurrency; + // The value of fragmentCompleted in the last received SCAN_FRAGCONF + Uint8 m_scan_frag_conf_status; inline void startFragTimer(Uint32 timeVal){ scanFragTimer = timeVal; @@ -1193,8 +1192,10 @@ public: // Number of operation records per scanned fragment // Number of operations in first batch // Max number of bytes per batch - Uint16 noOprecPerFrag; - Uint16 first_batch_size; + union { + Uint16 first_batch_size_rows; + Uint16 batch_size_rows; + }; Uint32 batch_byte_size; Uint32 scanRequestInfo; // ScanFrag format diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index d8b3ee10532..07dbb370ec6 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -8646,9 +8646,9 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr, scanptr.p->scanTableref = tabptr.i; scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion; scanptr.p->scanParallel = scanParallel; - scanptr.p->noOprecPerFrag = noOprecPerFrag; - scanptr.p->first_batch_size= scanTabReq->first_batch_size; - scanptr.p->batch_byte_size= scanTabReq->batch_byte_size; + scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size; + scanptr.p->batch_byte_size = scanTabReq->batch_byte_size; + scanptr.p->batch_size_rows = noOprecPerFrag; Uint32 tmp = 0; const UintR ri = scanTabReq->requestInfo; @@ -8672,7 +8672,6 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr, ndbrequire(list.seize(ptr)); ptr.p->scanRec = scanptr.i; ptr.p->scanFragId = 0; - ptr.p->scanFragConcurrency = noOprecPerFrag; ptr.p->m_apiPtr = cdata[i]; }//for @@ -8945,6 +8944,25 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal) scanptr.i = scanFragptr.p->scanRec; ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); + /** + * This must be false as select count(*) otherwise + * can "pass" committing on backup fragments and + * get incorrect row count + */ + if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo)) + { + jam(); + Uint32 max = 3+signal->theData[6]; + Uint32 nodeid = getOwnNodeId(); + for(Uint32 i = 3; i<max; i++) + if(signal->theData[i] == nodeid) + { + jam(); + tnodeid = nodeid; + break; + } + } + { /** * Check table @@ -9141,6 +9159,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal) const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0]; const Uint32 noCompletedOps = conf->completedOps; + const Uint32 status = conf->fragmentCompleted; scanFragptr.i = conf->senderData; c_scan_frag_pool.getPtr(scanFragptr); @@ -9163,11 +9182,9 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal) ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE); - const Uint32 status = conf->fragmentCompleted; - if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){ jam(); - if(status == ZFALSE){ + if(status == 0){ /** * We have started closing = we sent a close -> ignore this */ @@ -9184,11 +9201,11 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal) return; } - if(status == ZCLOSED && scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){ + if(noCompletedOps == 0 && status != 0 && + scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){ /** * Start on next fragment */ - ndbrequire(noCompletedOps == 0); scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF; scanFragptr.p->startFragTimer(ctcTimer); @@ -9218,6 +9235,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal) scanptr.p->m_queued_count++; } + scanFragptr.p->m_scan_frag_conf_status = status; scanFragptr.p->m_ops = noCompletedOps; scanFragptr.p->m_totalLen = total_len; scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY; @@ -9311,7 +9329,6 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal) /********************************************************************* * APPLICATION IS CLOSING THE SCAN. **********************************************************************/ - ndbrequire(len == 0); close_scan_req(signal, scanptr, true); return; }//if @@ -9330,11 +9347,12 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal) // Copy op ptrs so I dont overwrite them when sending... memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len); - ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0]; - nextReq->closeFlag = ZFALSE; - nextReq->transId1 = apiConnectptr.p->transid[0]; - nextReq->transId2 = apiConnectptr.p->transid[1]; - nextReq->batch_size_bytes= scanP->batch_byte_size; + ScanFragNextReq tmp; + tmp.closeFlag = ZFALSE; + tmp.transId1 = apiConnectptr.p->transid[0]; + tmp.transId2 = apiConnectptr.p->transid[1]; + tmp.batch_size_rows = scanP->batch_size_rows; + tmp.batch_size_bytes = scanP->batch_byte_size; ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags); ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags); @@ -9344,15 +9362,37 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal) c_scan_frag_pool.getPtr(scanFragptr); ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED); - scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE; scanFragptr.p->startFragTimer(ctcTimer); - scanFragptr.p->m_ops = 0; - nextReq->senderData = scanFragptr.i; - nextReq->batch_size_rows= scanFragptr.p->scanFragConcurrency; - sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); + if(scanFragptr.p->m_scan_frag_conf_status) + { + /** + * last scan was complete + */ + jam(); + ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag); + scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF; + + tcConnectptr.i = scanptr.p->scanTcrec; + ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); + scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++; + signal->theData[0] = tcConnectptr.p->dihConnectptr; + signal->theData[1] = scanFragptr.i; + signal->theData[2] = scanptr.p->scanTableref; + signal->theData[3] = scanFragptr.p->scanFragId; + sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB); + } + else + { + jam(); + scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE; + ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend(); + * req = tmp; + req->senderData = scanFragptr.i; + sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, + ScanFragNextReq::SignalLength, JBB); + } delivered.remove(scanFragptr); running.add(scanFragptr); }//for @@ -9416,7 +9456,7 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){ ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED); delivered.remove(curr); - if(curr.p->m_ops > 0){ + if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){ jam(); running.add(curr); curr.p->scanFragState = ScanFragRec::LQH_ACTIVE; @@ -9551,7 +9591,7 @@ void Dbtc::sendScanFragReq(Signal* signal, req->transId1 = apiConnectptr.p->transid[0]; req->transId2 = apiConnectptr.p->transid[1]; req->clientOpPtr = scanFragP->m_apiPtr; - req->batch_size_rows= scanFragP->scanFragConcurrency; + req->batch_size_rows= scanP->batch_size_rows; req->batch_size_bytes= scanP->batch_byte_size; sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal, ScanFragReq::SignalLength, JBB); @@ -9573,6 +9613,8 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) { jam(); ops += 21; } + + Uint32 left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId; ScanTabConf * conf = (ScanTabConf*)&signal->theData[0]; conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect; @@ -9588,24 +9630,25 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) { ScanFragRecPtr curr = ptr; // Remove while iterating... queued.next(ptr); + bool done = curr.p->m_scan_frag_conf_status && --left; + * ops++ = curr.p->m_apiPtr; - * ops++ = curr.i; + * ops++ = done ? RNIL : curr.i; * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops; queued.remove(curr); - if(curr.p->m_ops > 0){ + if(!done){ delivered.add(curr); curr.p->scanFragState = ScanFragRec::DELIVERED; curr.p->stopFragTimer(); } else { - (* --ops) = ScanTabConf::EndOfData; ops++; c_scan_frag_pool.release(curr); curr.p->scanFragState = ScanFragRec::COMPLETED; curr.p->stopFragTimer(); } } } - + if(scanPtr.p->m_delivered_scan_frags.isEmpty() && scanPtr.p->m_running_scan_frags.isEmpty()){ conf->requestInfo = op_count | ScanTabConf::EndOfData; @@ -10424,9 +10467,8 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal) sfp.i, sfp.p->scanFragState, sfp.p->scanFragId); - infoEvent(" nodeid=%d, concurr=%d, timer=%d", + infoEvent(" nodeid=%d, timer=%d", refToNode(sfp.p->lqhBlockref), - sfp.p->scanFragConcurrency, sfp.p->scanFragTimer); } @@ -10504,7 +10546,7 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal) sp.p->scanAiLength, sp.p->scanParallel, sp.p->scanReceivedOperations, - sp.p->noOprecPerFrag); + sp.p->batch_size_rows); infoEvent(" schv=%d, tab=%d, sproc=%d", sp.p->scanSchemaVersion, sp.p->scanTableref, diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 914dba00674..405f790954e 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -69,6 +69,17 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) Uint32 noOfAttributeGroups = signal->theData[12]; Uint32 globalCheckpointIdIndicator = signal->theData[13]; +#ifndef VM_TRACE + // config mismatch - do not crash if release compiled + if (regTabPtr.i >= cnoOfTablerec) { + ljam(); + signal->theData[0] = userptr; + signal->theData[1] = 800; + sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); + return; + } +#endif + ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { ljam(); diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index a433d72744e..da8596076ec 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2947,6 +2947,12 @@ void Qmgr::sendPrepFailReq(Signal* signal, Uint16 aNode) */ /** + * Should < 1/2 nodes die unconditionally. Affects only >= 3-way + * replication. + */ +static const bool g_ndb_arbit_one_half_rule = false; + +/** * Config signals are logically part of CM_INIT. */ void @@ -3157,7 +3163,8 @@ Qmgr::handleArbitCheck(Signal* signal) ndbrequire(cpresident == getOwnNodeId()); NodeBitmask ndbMask; computeArbitNdbMask(ndbMask); - if (2 * ndbMask.count() < cnoOfNodes) { + if (g_ndb_arbit_one_half_rule && + 2 * ndbMask.count() < cnoOfNodes) { jam(); arbitRec.code = ArbitCode::LoseNodes; } else { @@ -3181,6 +3188,11 @@ Qmgr::handleArbitCheck(Signal* signal) case CheckNodeGroups::Partitioning: jam(); arbitRec.code = ArbitCode::Partitioning; + if (g_ndb_arbit_one_half_rule && + 2 * ndbMask.count() > cnoOfNodes) { + jam(); + arbitRec.code = ArbitCode::WinNodes; + } break; default: ndbrequire(false); @@ -3190,8 +3202,12 @@ Qmgr::handleArbitCheck(Signal* signal) switch (arbitRec.code) { case ArbitCode::LoseNodes: jam(); + case ArbitCode::LoseGroups: + jam(); goto crashme; - case ArbitCode::WinGroups: + case ArbitCode::WinNodes: + jam(); + case ArbitCode::WinGroups: jam(); if (arbitRec.state == ARBIT_RUN) { jam(); @@ -3200,9 +3216,6 @@ Qmgr::handleArbitCheck(Signal* signal) arbitRec.state = ARBIT_INIT; arbitRec.newstate = true; break; - case ArbitCode::LoseGroups: - jam(); - goto crashme; case ArbitCode::Partitioning: if (arbitRec.state == ARBIT_RUN) { jam(); @@ -3762,8 +3775,7 @@ Qmgr::execARBIT_CHOOSEREF(Signal* signal) } /** - * Handle CRASH state. We must crash immediately. But it - * would be nice to wait until event reports have been sent. + * Handle CRASH state. We must crash immediately. * XXX tell other nodes in our party to crash too. */ void @@ -3773,12 +3785,11 @@ Qmgr::stateArbitCrash(Signal* signal) if (arbitRec.newstate) { jam(); CRASH_INSERTION((Uint32)910 + arbitRec.state); - arbitRec.setTimestamp(); arbitRec.code = 0; arbitRec.newstate = false; } -#if 0 +#ifdef ndb_arbit_crash_wait_for_event_report_to_get_out if (! (arbitRec.getTimediff() > getArbitTimeout())) return; #endif diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp index d11d5f7176a..f6d9a0ac35a 100644 --- a/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1888,7 +1888,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){ req->requestInfo = 0; req->savePointId = 0; ScanFragReq::setLockMode(req->requestInfo, 0); - ScanFragReq::setHoldLockFlag(req->requestInfo, 0); + ScanFragReq::setHoldLockFlag(req->requestInfo, 1); ScanFragReq::setKeyinfoFlag(req->requestInfo, 0); ScanFragReq::setAttrLen(req->requestInfo, attrLen); req->fragmentNoKeyLen = fd.m_fragDesc.m_fragmentNo; diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 926647838c9..f34e16318cd 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -19,7 +19,6 @@ #include <ndb_version.h> #include "Configuration.hpp" -#include <LocalConfig.hpp> #include <TransporterRegistry.hpp> #include "vm/SimBlockList.hpp" @@ -69,16 +68,9 @@ int main(int argc, char** argv) return NRT_Default; } - LocalConfig local_config; - if (!local_config.init(theConfig->getConnectString(),0)){ - local_config.printError(); - local_config.printUsage(); - return NRT_Default; - } - { // Do configuration signal(SIGPIPE, SIG_IGN); - theConfig->fetch_configuration(local_config); + theConfig->fetch_configuration(); } chdir(NdbConfig_get_path(0)); @@ -141,7 +133,7 @@ int main(int argc, char** argv) exit(0); } g_eventLogger.info("Ndb has terminated (pid %d) restarting", child); - theConfig->fetch_configuration(local_config); + theConfig->fetch_configuration(); } g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid()); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index b3a436275f7..931b4da5a17 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -17,7 +17,6 @@ #include <ndb_global.h> #include <ndb_opts.h> -#include <LocalConfig.hpp> #include "Configuration.hpp" #include <ErrorHandlingMacros.hpp> #include "GlobalData.hpp" @@ -35,6 +34,7 @@ #include <kernel_types.h> #include <ndb_limits.h> +#include <ndbapi_limits.h> #include "pc.hpp" #include <LogLevel.hpp> #include <NdbSleep.h> @@ -108,7 +108,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), bool Configuration::init(int argc, char** argv) { - const char *load_default_groups[]= { "ndbd",0 }; + const char *load_default_groups[]= { "mysql_cluster","ndbd",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; @@ -189,7 +189,7 @@ Configuration::closeConfiguration(){ } void -Configuration::fetch_configuration(LocalConfig &local_config){ +Configuration::fetch_configuration(){ /** * Fetch configuration from management server */ @@ -199,8 +199,17 @@ Configuration::fetch_configuration(LocalConfig &local_config){ m_mgmd_port= 0; m_mgmd_host= 0; - m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB); - if(m_config_retriever->do_connect() == -1){ + m_config_retriever= new ConfigRetriever(getConnectString(), + NDB_VERSION, NODE_TYPE_DB); + + if (m_config_retriever->hasError()) + { + ERROR_SET(fatal, ERR_INVALID_CONFIG, + "Could not connect initialize handle to management server", + m_config_retriever->getErrorString()); + } + + if(m_config_retriever->do_connect(12,5,1) == -1){ const char * s = m_config_retriever->getErrorString(); if(s == 0) s = "No error given!"; @@ -215,13 +224,7 @@ Configuration::fetch_configuration(LocalConfig &local_config){ ConfigRetriever &cr= *m_config_retriever; - if((globalData.ownId = cr.allocNodeId()) == 0){ - for(Uint32 i = 0; i<3; i++){ - NdbSleep_SecSleep(3); - if((globalData.ownId = cr.allocNodeId()) != 0) - break; - } - } + globalData.ownId = cr.allocNodeId(2 /*retry*/,3 /*delay*/); if(globalData.ownId == 0){ ERROR_SET(fatal, ERR_INVALID_CONFIG, @@ -452,6 +455,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ unsigned int noOfTables = 0; unsigned int noOfUniqueHashIndexes = 0; unsigned int noOfOrderedIndexes = 0; + unsigned int noOfTriggers = 0; unsigned int noOfReplicas = 0; unsigned int noOfDBNodes = 0; unsigned int noOfAPINodes = 0; @@ -476,6 +480,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ { CFG_DB_NO_TABLES, &noOfTables, false }, { CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes, false }, { CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes, false }, + { CFG_DB_NO_TRIGGERS, &noOfTriggers, true }, { CFG_DB_NO_REPLICAS, &noOfReplicas, false }, { CFG_DB_NO_ATTRIBUTES, &noOfAttributes, false }, { CFG_DB_NO_OPS, &noOfOperations, false }, @@ -584,12 +589,41 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ ConfigValues::Iterator it2(*ownConfig, db.m_config); it2.set(CFG_DB_NO_TABLES, noOfTables); it2.set(CFG_DB_NO_ATTRIBUTES, noOfAttributes); + { + Uint32 neededNoOfTriggers = /* types: Insert/Update/Delete/Custom */ + 3 * noOfUniqueHashIndexes + /* for unique hash indexes, I/U/D */ + 3 * NDB_MAX_ACTIVE_EVENTS + /* for events in suma, I/U/D */ + 3 * noOfTables + /* for backup, I/U/D */ + noOfOrderedIndexes; /* for ordered indexes, C */ + if (noOfTriggers < neededNoOfTriggers) + { + noOfTriggers= neededNoOfTriggers; + it2.set(CFG_DB_NO_TRIGGERS, noOfTriggers); + } + } /** * Do size calculations */ ConfigValuesFactory cfg(ownConfig); + Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes + + noOfUniqueHashIndexes; + if (noOfMetaTables > MAX_TABLES) + noOfMetaTables= MAX_TABLES; + + { + /** + * Dict Size Alt values + */ + cfg.put(CFG_DICT_ATTRIBUTE, + noOfAttributes); + + cfg.put(CFG_DICT_TABLE, + noOfMetaTables); + } + + if (noOfLocalScanRecords == 0) { noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1; } @@ -599,7 +633,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ Uint32 noOfTCScanRecords = noOfScanRecords; { - Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes; + Uint32 noOfAccTables= noOfMetaTables/*noOfTables+noOfUniqueHashIndexes*/; /** * Acc Size Alt values */ @@ -641,19 +675,6 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords); } - Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes + - noOfUniqueHashIndexes; - { - /** - * Dict Size Alt values - */ - cfg.put(CFG_DICT_ATTRIBUTE, - noOfAttributes); - - cfg.put(CFG_DICT_TABLE, - noOfMetaTables); - } - { /** * Dih Size Alt values @@ -746,8 +767,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ noOfMetaTables); cfg.put(CFG_TUP_TABLE_DESC, - 4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas + - 12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas ); + 2 * 6 * NO_OF_FRAG_PER_NODE * noOfAttributes * noOfReplicas + + 2 * 10 * NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas ); cfg.put(CFG_TUP_STORED_PROC, noOfLocalScanRecords); @@ -758,9 +779,9 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ * Tux Size Alt values */ cfg.put(CFG_TUX_INDEX, - noOfOrderedIndexes); + noOfMetaTables /*noOfOrderedIndexes*/); - cfg.put(CFG_TUX_FRAGMENT, + cfg.put(CFG_TUX_FRAGMENT, 2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas); cfg.put(CFG_TUX_ATTRIBUTE, diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index e4cd64f5ca8..acf0e163a84 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -21,7 +21,6 @@ #include <ndb_types.h> class ConfigRetriever; -class LocalConfig; class Configuration { public: @@ -33,7 +32,7 @@ public: */ bool init(int argc, char** argv); - void fetch_configuration(LocalConfig &local_config); + void fetch_configuration(); void setupConfiguration(); void closeConfiguration(); diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/ndb/src/mgmapi/LocalConfig.cpp index d0ff97cdedf..8f1e2ee8100 100644 --- a/ndb/src/mgmapi/LocalConfig.cpp +++ b/ndb/src/mgmapi/LocalConfig.cpp @@ -14,7 +14,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <LocalConfig.hpp> +#include "LocalConfig.hpp" #include <NdbEnv.h> #include <NdbConfig.h> #include <NdbAutoPtr.hpp> @@ -294,4 +294,19 @@ LocalConfig::readConnectString(const char * connectString, return return_value; } +char * +LocalConfig::makeConnectString(char *buf, int sz) +{ + int p= BaseString::snprintf(buf,sz,"nodeid=%d", _ownNodeId); + for (int i = 0; (i < ids.size()) && (sz-p > 0); i++) + { + if (ids[i].type != MgmId_TCP) + continue; + p+=BaseString::snprintf(buf+p,sz-p,",%s:%d", + ids[i].name.c_str(), ids[i].port); + } + buf[sz-1]=0; + return buf; +} + template class Vector<MgmtSrvrId>; diff --git a/ndb/include/mgmapi/LocalConfig.hpp b/ndb/src/mgmapi/LocalConfig.hpp index 9ceeffdba36..c415ec1be91 100644 --- a/ndb/include/mgmapi/LocalConfig.hpp +++ b/ndb/src/mgmapi/LocalConfig.hpp @@ -61,6 +61,7 @@ struct LocalConfig { bool parseHostName(const char *buf); bool parseFileName(const char *buf); bool parseString(const char *buf, BaseString &err); + char * makeConnectString(char *buf, int sz); }; #endif // LocalConfig_H diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 51f2d7cee01..831d14eac52 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -20,6 +20,7 @@ #include <LocalConfig.hpp> #include <NdbAutoPtr.hpp> +#include <NdbSleep.h> #include <NdbTCP.h> #include "mgmapi.h" #include "mgmapi_debug.h" @@ -83,8 +84,8 @@ typedef Parser<ParserDummy> Parser_t; #define NDB_MGM_MAX_ERR_DESC_SIZE 256 struct ndb_mgm_handle { - char * hostname; - unsigned short port; + char * connectstring; + int cfg_i; int connected; int last_error; @@ -95,7 +96,7 @@ struct ndb_mgm_handle { NDB_SOCKET_TYPE socket; - char cfg_ptr[sizeof(LocalConfig)]; + LocalConfig cfg; #ifdef MGMAPI_LOG FILE* logfile; @@ -148,14 +149,16 @@ ndb_mgm_create_handle() h->connected = 0; h->last_error = 0; h->last_error_line = 0; - h->hostname = 0; h->socket = NDB_INVALID_SOCKET; h->read_timeout = 50000; h->write_timeout = 100; - - new (h->cfg_ptr) LocalConfig; + h->cfg_i = 0; strncpy(h->last_error_desc, "No error", NDB_MGM_MAX_ERR_DESC_SIZE); + + new (&(h->cfg)) LocalConfig; + h->cfg.init(0, 0); + #ifdef MGMAPI_LOG h->logfile = 0; #endif @@ -163,6 +166,23 @@ ndb_mgm_create_handle() return h; } +extern "C" +int +ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv) +{ + new (&(handle->cfg)) LocalConfig; + if (!handle->cfg.init(mgmsrv, 0) || + handle->cfg.ids.size() == 0) + { + new (&(handle->cfg)) LocalConfig; + handle->cfg.init(0, 0); /* reset the LocalCongig */ + SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, ""); + return -1; + } + handle->cfg_i= 0; + return 0; +} + /** * Destroy a handle */ @@ -175,14 +195,13 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle) if((* handle)->connected){ ndb_mgm_disconnect(* handle); } - my_free((* handle)->hostname,MYF(MY_ALLOW_ZERO_PTR)); #ifdef MGMAPI_LOG if ((* handle)->logfile != 0){ fclose((* handle)->logfile); (* handle)->logfile = 0; } #endif - ((LocalConfig*)((*handle)->cfg_ptr))->~LocalConfig(); + (*handle)->cfg.~LocalConfig(); my_free((char*)* handle,MYF(MY_ALLOW_ZERO_PTR)); * handle = 0; } @@ -314,7 +333,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, */ extern "C" int -ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv) +ndb_mgm_connect(NdbMgmHandle handle, int no_retries, + int retry_delay_in_seconds, int verbose) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_connect"); CHECK_HANDLE(handle, -1); @@ -331,36 +351,60 @@ ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv) /** * Do connect */ - LocalConfig *cfg= (LocalConfig*)(handle->cfg_ptr); - new (cfg) LocalConfig; - if (!cfg->init(mgmsrv, 0) || - cfg->ids.size() == 0) - { - SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, ""); - return -1; - } - + LocalConfig &cfg= handle->cfg; NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET; Uint32 i; - for (i = 0; i < cfg->ids.size(); i++) + while (sockfd == NDB_INVALID_SOCKET) { - if (cfg->ids[i].type != MgmId_TCP) - continue; - SocketClient s(cfg->ids[i].name.c_str(), cfg->ids[i].port); - sockfd = s.connect(); + // do all the mgmt servers + for (i = 0; i < cfg.ids.size(); i++) + { + if (cfg.ids[i].type != MgmId_TCP) + continue; + SocketClient s(cfg.ids[i].name.c_str(), cfg.ids[i].port); + sockfd = s.connect(); + if (sockfd != NDB_INVALID_SOCKET) + break; + } if (sockfd != NDB_INVALID_SOCKET) break; - } - if (sockfd == NDB_INVALID_SOCKET) - { - setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, - "Unable to connect using connectstring %s", mgmsrv); - return -1; + if (verbose > 0) { + char buf[1024]; + ndbout_c("Unable to connect with connect string: %s", + cfg.makeConnectString(buf,sizeof(buf))); + verbose= -1; + } + if (no_retries == 0) { + char buf[1024]; + setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, + "Unable to connect with connect string: %s", + cfg.makeConnectString(buf,sizeof(buf))); + if (verbose == -2) + ndbout << ", failed." << endl; + return -1; + } + if (verbose == -1) { + ndbout << "Retrying every " << retry_delay_in_seconds << " seconds"; + if (no_retries > 0) + ndbout << ". Attempts left:"; + else + ndbout << ", until connected.";; + ndbout << flush; + verbose= -2; + } + if (no_retries > 0) { + if (verbose == -2) { + ndbout << " " << no_retries; + ndbout << flush; + } + no_retries--; + } + NdbSleep_SecSleep(retry_delay_in_seconds); } + if (verbose == -2) + ndbout << endl; - my_free(handle->hostname,MYF(MY_ALLOW_ZERO_PTR)); - handle->hostname = my_strdup(cfg->ids[i].name.c_str(),MYF(MY_WME)); - handle->port = cfg->ids[i].port; + handle->cfg_i = i; handle->socket = sockfd; handle->connected = 1; @@ -1068,7 +1112,9 @@ ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]) }; CHECK_HANDLE(handle, -1); - SocketClient s(handle->hostname, handle->port); + const char *hostname= ndb_mgm_get_connected_host(handle); + int port= ndb_mgm_get_connected_port(handle); + SocketClient s(hostname, port); const NDB_SOCKET_TYPE sockfd = s.connect(); if (sockfd < 0) { setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, @@ -1613,16 +1659,37 @@ ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *cfg) extern "C" int -ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodeid, int nodetype) +ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle) +{ + CHECK_HANDLE(handle, 0); + return handle->cfg._ownNodeId; +} + +extern "C" +int ndb_mgm_get_connected_port(NdbMgmHandle handle) { + return handle->cfg.ids[handle->cfg_i].port; +} + +extern "C" +const char *ndb_mgm_get_connected_host(NdbMgmHandle handle) +{ + return handle->cfg.ids[handle->cfg_i].name.c_str(); +} +extern "C" +int +ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype) +{ CHECK_HANDLE(handle, 0); CHECK_CONNECTED(handle, 0); + int nodeid= handle->cfg._ownNodeId; + Properties args; args.put("version", version); args.put("nodetype", nodetype); - args.put("nodeid", *pnodeid); + args.put("nodeid", nodeid); args.put("user", "mysqld"); args.put("password", "mysqld"); args.put("public key", "a public key"); @@ -1638,26 +1705,29 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodei prop= ndb_mgm_call(handle, reply, "get nodeid", &args); CHECK_REPLY(prop, -1); - int res= -1; + nodeid= -1; do { const char * buf; if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){ + const char *hostname= ndb_mgm_get_connected_host(handle); + unsigned port= ndb_mgm_get_connected_port(handle); BaseString err; err.assfmt("Could not alloc node id at %s port %d: %s", - handle->hostname, handle->port, buf); + hostname, port, buf); setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, err.c_str()); break; } - if(!prop->get("nodeid", pnodeid) != 0){ + Uint32 _nodeid; + if(!prop->get("nodeid", &_nodeid) != 0){ ndbout_c("ERROR Message: <nodeid Unspecified>\n"); break; } - res= 0; + nodeid= _nodeid; }while(0); delete prop; - return res; + return nodeid; } /***************************************************************************** diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index bdeb885ed8b..bfe8b6786b4 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -44,7 +44,7 @@ public: * Constructor * @param mgmtSrvr: Management server to use when executing commands */ - CommandInterpreter(const char *); + CommandInterpreter(const char *, int verbose); ~CommandInterpreter(); /** @@ -94,6 +94,7 @@ private: */ void executeHelp(char* parameters); void executeShow(char* parameters); + void executeConnect(char* parameters); void executePurge(char* parameters); void executeShutdown(char* parameters); void executeRun(char* parameters); @@ -153,7 +154,7 @@ private: NdbMgmHandle m_mgmsrv; bool connected; - const char *host; + int m_verbose; int try_reconnect; #ifdef HAVE_GLOBAL_REPLICATION NdbRepHandle m_repserver; @@ -170,9 +171,9 @@ private: #include "ndb_mgmclient.hpp" #include "ndb_mgmclient.h" -Ndb_mgmclient::Ndb_mgmclient(const char *host) +Ndb_mgmclient::Ndb_mgmclient(const char *host,int verbose) { - m_cmd= new CommandInterpreter(host); + m_cmd= new CommandInterpreter(host,verbose); } Ndb_mgmclient::~Ndb_mgmclient() { @@ -276,6 +277,7 @@ static const char* helpText = "REP CONNECT <host:port> Connect to REP server on host:port\n" #endif "PURGE STALE SESSIONS Reset reserved nodeid's in the mgmt server\n" +"CONNECT Connect to management server (reconnect if already connected)\n" "QUIT Quit management client\n" ; @@ -374,20 +376,22 @@ convert(const char* s, int& val) { /* * Constructor */ -CommandInterpreter::CommandInterpreter(const char *_host) +CommandInterpreter::CommandInterpreter(const char *_host,int verbose) + : m_verbose(verbose) { m_mgmsrv = ndb_mgm_create_handle(); if(m_mgmsrv == NULL) { ndbout_c("Cannot create handle to management server."); + exit(-1); + } + if (ndb_mgm_set_connectstring(m_mgmsrv, _host)) + { printError(); + exit(-1); } connected = false; try_reconnect = 0; - if (_host) - host= my_strdup(_host,MYF(MY_WME)); - else - host= 0; #ifdef HAVE_GLOBAL_REPLICATION rep_host = NULL; m_repserver = NULL; @@ -402,8 +406,6 @@ CommandInterpreter::~CommandInterpreter() { connected = false; ndb_mgm_destroy_handle(&m_mgmsrv); - my_free((char *)host,MYF(MY_ALLOW_ZERO_PTR)); - host = NULL; } static bool @@ -438,17 +440,15 @@ bool CommandInterpreter::connect() { if(!connected) { - int tries = try_reconnect; // tries == 0 => infinite - while(!connected) { - if(ndb_mgm_connect(m_mgmsrv, host) == -1) { - ndbout << "Cannot connect to management server (" << host << ")."; - tries--; - if (tries == 0) - break; - ndbout << "Retrying in 5 seconds." << endl; - NdbSleep_SecSleep(5); - } else - connected = true; + if(!ndb_mgm_connect(m_mgmsrv, try_reconnect-1, 5, 1)) + { + connected = true; + if (m_verbose) + { + printf("Connected to Management Server at: %s:%d\n", + ndb_mgm_get_connected_host(m_mgmsrv), + ndb_mgm_get_connected_port(m_mgmsrv)); + } } } return connected; @@ -457,7 +457,7 @@ CommandInterpreter::connect() bool CommandInterpreter::disconnect() { - if (ndb_mgm_disconnect(m_mgmsrv) == -1) { + if (connected && (ndb_mgm_disconnect(m_mgmsrv) == -1)) { ndbout_c("Could not disconnect from management server"); printError(); } @@ -471,18 +471,21 @@ CommandInterpreter::disconnect() int CommandInterpreter::execute(const char *_line, int _try_reconnect) { + DBUG_ENTER("CommandInterpreter::execute"); + DBUG_PRINT("info",("line=\"%s\"",_line)); + if (_try_reconnect >= 0) try_reconnect=_try_reconnect; char * line; if(_line == NULL) { // ndbout << endl; - return false; + DBUG_RETURN(false); } line = my_strdup(_line,MYF(MY_WME)); My_auto_ptr<char> ptr(line); if (emptyString(line)) { - return true; + DBUG_RETURN(true); } for (unsigned int i = 0; i < strlen(line); ++i) { @@ -496,41 +499,49 @@ CommandInterpreter::execute(const char *_line, int _try_reconnect) if (strcmp(firstToken, "HELP") == 0 || strcmp(firstToken, "?") == 0) { executeHelp(allAfterFirstToken); - return true; + DBUG_RETURN(true); } - else if (strcmp(firstToken, "SHOW") == 0) { + else if (strcmp(firstToken, "CONNECT") == 0) { + executeConnect(allAfterFirstToken); + DBUG_RETURN(true); + } + + if (!connect()) + DBUG_RETURN(true); + + if (strcmp(firstToken, "SHOW") == 0) { executeShow(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if (strcmp(firstToken, "SHUTDOWN") == 0) { executeShutdown(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if (strcmp(firstToken, "CLUSTERLOG") == 0){ executeClusterLog(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if(strcmp(firstToken, "START") == 0 && allAfterFirstToken != NULL && strncmp(allAfterFirstToken, "BACKUP", sizeof("BACKUP") - 1) == 0){ executeStartBackup(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if(strcmp(firstToken, "ABORT") == 0 && allAfterFirstToken != NULL && strncmp(allAfterFirstToken, "BACKUP", sizeof("BACKUP") - 1) == 0){ executeAbortBackup(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if (strcmp(firstToken, "PURGE") == 0) { executePurge(allAfterFirstToken); - return true; + DBUG_RETURN(true); } #ifdef HAVE_GLOBAL_REPLICATION else if(strcmp(firstToken, "REPLICATION") == 0 || strcmp(firstToken, "REP") == 0) { executeRep(allAfterFirstToken); - return true; + DBUG_RETURN(true); } #endif // HAVE_GLOBAL_REPLICATION else if(strcmp(firstToken, "ENTER") == 0 && @@ -538,14 +549,14 @@ CommandInterpreter::execute(const char *_line, int _try_reconnect) strncmp(allAfterFirstToken, "SINGLE USER MODE ", sizeof("SINGLE USER MODE") - 1) == 0){ executeEnterSingleUser(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if(strcmp(firstToken, "EXIT") == 0 && allAfterFirstToken != NULL && strncmp(allAfterFirstToken, "SINGLE USER MODE ", sizeof("SINGLE USER MODE") - 1) == 0){ executeExitSingleUser(allAfterFirstToken); - return true; + DBUG_RETURN(true); } else if (strcmp(firstToken, "ALL") == 0) { analyseAfterFirstToken(-1, allAfterFirstToken); @@ -554,7 +565,7 @@ CommandInterpreter::execute(const char *_line, int _try_reconnect) strcmp(firstToken, "EXIT") == 0 || strcmp(firstToken, "BYE") == 0) && allAfterFirstToken == NULL){ - return false; + DBUG_RETURN(false); } else { /** * First token should be a digit, node ID @@ -564,18 +575,18 @@ CommandInterpreter::execute(const char *_line, int _try_reconnect) if (! convert(firstToken, nodeId)) { ndbout << "Invalid command: " << line << endl; ndbout << "Type HELP for help." << endl << endl; - return true; + DBUG_RETURN(true); } if (nodeId < 0) { ndbout << "Invalid node ID: " << firstToken << "." << endl; - return true; + DBUG_RETURN(true); } analyseAfterFirstToken(nodeId, allAfterFirstToken); } - return true; + DBUG_RETURN(true); } @@ -704,7 +715,6 @@ CommandInterpreter::executeForAll(const char * cmd, ExecuteFunction fun, ndbout_c("Trying to start all nodes of system."); ndbout_c("Use ALL STATUS to see the system start-up phases."); } else { - connect(); struct ndb_mgm_cluster_state *cl= ndb_mgm_get_status(m_mgmsrv); if(cl == 0){ ndbout_c("Unable get status from management server"); @@ -838,8 +848,6 @@ CommandInterpreter::executeHelp(char* parameters) void CommandInterpreter::executeShutdown(char* parameters) { - connect(); - ndb_mgm_cluster_state *state = ndb_mgm_get_status(m_mgmsrv); if(state == NULL) { ndbout_c("Could not get status"); @@ -991,7 +999,6 @@ CommandInterpreter::executePurge(char* parameters) int i; char *str; - connect(); if (ndb_mgm_purge_stale_sessions(m_mgmsrv, &str)) { ndbout_c("Command failed"); @@ -1011,7 +1018,6 @@ void CommandInterpreter::executeShow(char* parameters) { int i; - connect(); if (emptyString(parameters)) { ndbout << "Cluster Configuration" << endl << "---------------------" << endl; @@ -1099,6 +1105,12 @@ CommandInterpreter::executeShow(char* parameters) } } +void +CommandInterpreter::executeConnect(char* parameters) +{ + disconnect(); + connect(); +} //***************************************************************************** //***************************************************************************** @@ -1106,7 +1118,6 @@ void CommandInterpreter::executeClusterLog(char* parameters) { int i; - connect(); if (parameters != 0 && strlen(parameters) != 0) { enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL; int isOk = true; @@ -1252,7 +1263,6 @@ CommandInterpreter::executeClusterLog(char* parameters) void CommandInterpreter::executeStop(int processId, const char *, bool all) { - connect(); int result = 0; if(all) { result = ndb_mgm_stop(m_mgmsrv, 0, 0); @@ -1274,7 +1284,6 @@ CommandInterpreter::executeStop(int processId, const char *, bool all) void CommandInterpreter::executeEnterSingleUser(char* parameters) { - connect(); strtok(parameters, " "); struct ndb_mgm_reply reply; char* id = strtok(NULL, " "); @@ -1301,7 +1310,6 @@ CommandInterpreter::executeEnterSingleUser(char* parameters) void CommandInterpreter::executeExitSingleUser(char* parameters) { - connect(); int result = ndb_mgm_exit_single_user(m_mgmsrv, 0); if (result != 0) { ndbout_c("Exiting single user mode failed."); @@ -1316,7 +1324,6 @@ void CommandInterpreter::executeStart(int processId, const char* parameters, bool all) { - connect(); int result; if(all) { result = ndb_mgm_start(m_mgmsrv, 0, 0); @@ -1340,7 +1347,6 @@ void CommandInterpreter::executeRestart(int processId, const char* parameters, bool all) { - connect(); int result; int nostart = 0; int initialstart = 0; @@ -1390,7 +1396,6 @@ CommandInterpreter::executeDumpState(int processId, const char* parameters, ndbout << "Expected argument" << endl; return; } - connect(); Uint32 no = 0; int pars[25]; @@ -1430,7 +1435,6 @@ CommandInterpreter::executeStatus(int processId, return; } - connect(); ndb_mgm_node_status status; Uint32 startPhase, version; bool system; @@ -1481,7 +1485,6 @@ void CommandInterpreter::executeLogLevel(int processId, const char* parameters, bool all) { - connect(); (void) all; BaseString tmp(parameters); @@ -1537,7 +1540,6 @@ void CommandInterpreter::executeError(int processId, return; } - connect(); // Copy parameters since strtok will modify it char* newpar = my_strdup(parameters,MYF(MY_WME)); My_auto_ptr<char> ap1(newpar); @@ -1601,7 +1603,6 @@ void CommandInterpreter::executeLog(int processId, const char* parameters, bool all) { - connect(); struct ndb_mgm_reply reply; Vector<const char *> blocks; if (! parseBlockSpecification(parameters, blocks)) { @@ -1669,7 +1670,6 @@ CommandInterpreter::executeTestOn(int processId, ndbout << "No parameters expected to this command." << endl; return; } - connect(); struct ndb_mgm_reply reply; int result = ndb_mgm_start_signallog(m_mgmsrv, processId, &reply); if (result != 0) { @@ -1688,7 +1688,6 @@ CommandInterpreter::executeTestOff(int processId, ndbout << "No parameters expected to this command." << endl; return; } - connect(); struct ndb_mgm_reply reply; int result = ndb_mgm_stop_signallog(m_mgmsrv, processId, &reply); if (result != 0) { @@ -1810,8 +1809,6 @@ CommandInterpreter::executeEventReporting(int processId, ndbout << "Expected argument" << endl; return; } - connect(); - BaseString tmp(parameters); Vector<BaseString> spec; tmp.split(spec, "="); @@ -1862,7 +1859,6 @@ CommandInterpreter::executeEventReporting(int processId, void CommandInterpreter::executeStartBackup(char* /*parameters*/) { - connect(); struct ndb_mgm_reply reply; unsigned int backupId; @@ -1909,8 +1905,6 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/) void CommandInterpreter::executeAbortBackup(char* parameters) { - connect(); - strtok(parameters, " "); struct ndb_mgm_reply reply; char* id = strtok(NULL, "\0"); @@ -1964,7 +1958,6 @@ CommandInterpreter::executeRep(char* parameters) return; } - connect(); char * line = my_strdup(parameters,MYF(MY_WME)); My_auto_ptr<char> ap1((char*)line); char * firstToken = strtok(line, " "); diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp index 401a9198f30..08d5d60cfab 100644 --- a/ndb/src/mgmclient/main.cpp +++ b/ndb/src/mgmclient/main.cpp @@ -30,9 +30,10 @@ extern "C" int add_history(const char *command); /* From readline directory */ #include <NdbMain.h> #include <NdbHost.h> +#include <BaseString.hpp> +#include <NdbOut.hpp> #include <mgmapi.h> #include <ndb_version.h> -#include <LocalConfig.hpp> #include "ndb_mgmclient.hpp" @@ -55,17 +56,18 @@ handler(int sig){ } } - +static const char default_prompt[]= "ndb_mgm> "; static unsigned _try_reconnect; static char *opt_connect_str= 0; +static const char *prompt= default_prompt; static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_mgm"), { "try-reconnect", 't', - "Specify number of retries for connecting to ndb_mgmd, default infinite", + "Specify number of tries for connecting to ndb_mgmd (0 = infinite)", (gptr*) &_try_reconnect, (gptr*) &_try_reconnect, 0, - GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + GET_UINT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void short_usage_sub(void) @@ -115,13 +117,13 @@ read_and_execute(int _try_reconnect) } #ifdef HAVE_READLINE /* Get a line from the user. */ - line_read = readline ("ndb_mgm> "); + line_read = readline (prompt); /* If the line has any text in it, save it on the history. */ if (line_read && *line_read) add_history (line_read); #else static char linebuffer[254]; - fputs("ndb_mgm> ", stdout); + fputs(prompt, stdout); linebuffer[sizeof(linebuffer)-1]=0; line_read = fgets(linebuffer, sizeof(linebuffer)-1, stdin); if (line_read == linebuffer) { @@ -138,7 +140,7 @@ int main(int argc, char** argv){ NDB_INIT(argv[0]); const char *_host = 0; int _port = 0; - const char *load_default_groups[]= { "ndb_mgm",0 }; + const char *load_default_groups[]= { "mysql_cluster","ndb_mgm",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; @@ -154,12 +156,16 @@ int main(int argc, char** argv){ opt_connect_str= buf; } + if (!isatty(0)) + { + prompt= 0; + } + ndbout << "-- NDB Cluster -- Management Client --" << endl; - printf("Connecting to Management Server: %s\n", opt_connect_str ? opt_connect_str : "default"); signal(SIGPIPE, handler); - com = new Ndb_mgmclient(opt_connect_str); + com = new Ndb_mgmclient(opt_connect_str,1); while(read_and_execute(_try_reconnect)); delete com; diff --git a/ndb/src/mgmclient/ndb_mgmclient.hpp b/ndb/src/mgmclient/ndb_mgmclient.hpp index f6bcebc3896..ea592dfdf4e 100644 --- a/ndb/src/mgmclient/ndb_mgmclient.hpp +++ b/ndb/src/mgmclient/ndb_mgmclient.hpp @@ -21,7 +21,7 @@ class CommandInterpreter; class Ndb_mgmclient { public: - Ndb_mgmclient(const char*); + Ndb_mgmclient(const char*,int verbose=0); ~Ndb_mgmclient(); int execute(const char *_line, int _try_reconnect=-1); int execute(int argc, char** argv, int _try_reconnect=-1); diff --git a/ndb/src/mgmsrv/InitConfigFileParser.cpp b/ndb/src/mgmsrv/InitConfigFileParser.cpp index fdfe7823fc2..05102255eaa 100644 --- a/ndb/src/mgmsrv/InitConfigFileParser.cpp +++ b/ndb/src/mgmsrv/InitConfigFileParser.cpp @@ -213,48 +213,41 @@ InitConfigFileParser::parseConfig(FILE * file) { // Parse Name-Value Pair //**************************************************************************** -bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) { - - char tmpLine[MAX_LINE_LENGTH]; - char fname[MAX_LINE_LENGTH], rest[MAX_LINE_LENGTH]; - char* t; - const char *separator_list[]= {":", "=", 0}; - const char *separator= 0; - +bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) +{ if (ctx.m_currentSection == NULL){ ctx.reportError("Value specified outside section"); return false; } - strncpy(tmpLine, line, MAX_LINE_LENGTH); - // ************************************* - // Check if a separator exists in line + // Split string at first occurrence of + // '=' or ':' // ************************************* - for(int i= 0; separator_list[i] != 0; i++) { - if(strchr(tmpLine, separator_list[i][0])) { - separator= separator_list[i]; - break; - } - } - if (separator == 0) { + Vector<BaseString> tmp_string_split; + if (BaseString(line).split(tmp_string_split, + BaseString("=:"), + 2) != 2) + { ctx.reportError("Parse error"); return false; } - // ******************************************* - // Get pointer to substring before separator - // ******************************************* - t = strtok(tmpLine, separator); - - // ***************************************** - // Count number of tokens before separator - // ***************************************** - if (sscanf(t, "%120s%120s", fname, rest) != 1) { - ctx.reportError("Multiple names before \'%c\'", separator[0]); - return false; + // ************************************* + // Remove leading and trailing chars + // ************************************* + { + for (int i = 0; i < 2; i++) + tmp_string_split[i].trim("\r\n \t"); } + + // ************************************* + // First in split is fname + // ************************************* + + const char *fname= tmp_string_split[0].c_str(); + if (!ctx.m_currentInfo->contains(fname)) { ctx.reportError("[%s] Unknown parameter: %s", ctx.fname, fname); return false; @@ -273,24 +266,11 @@ bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) { } } - // ****************************************** - // Get pointer to substring after separator - // ****************************************** - t = strtok(NULL, "\0"); - if (t == NULL) { - ctx.reportError("No value for parameter"); - return false; - } - - // ****************************************** - // Remove prefix and postfix spaces and tabs - // ******************************************* - trim(t); - // *********************** // Store name-value pair // *********************** - return storeNameValuePair(ctx, fname, t); + + return storeNameValuePair(ctx, fname, tmp_string_split[1].c_str()); } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index a49b29af275..986da71a8e8 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -399,16 +399,20 @@ MgmtSrvr::getPort() const { } /* Constructor */ -MgmtSrvr::MgmtSrvr(NodeId nodeId, - SocketServer *socket_server, - const BaseString &configFilename, - LocalConfig &local_config, - Config * config): +int MgmtSrvr::init() +{ + if ( _ownNodeId > 0) + return 0; + return -1; +} + +MgmtSrvr::MgmtSrvr(SocketServer *socket_server, + const char *config_filename, + const char *connect_string) : _blockNumber(1), // Hard coded block number since it makes it easy to send // signals to other management servers. m_socket_server(socket_server), _ownReference(0), - m_local_config(local_config), theSignalIdleList(NULL), theWaitState(WAIT_SUBSCRIBE_CONF), m_statisticsListner(this) @@ -416,6 +420,8 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, DBUG_ENTER("MgmtSrvr::MgmtSrvr"); + _ownNodeId= 0; + _config = NULL; _isStopThread = false; @@ -426,12 +432,48 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, theFacade = 0; m_newConfig = NULL; - m_configFilename = configFilename; + if (config_filename) + m_configFilename.assign(config_filename); + else + m_configFilename.assign("config.ini"); m_nextConfigGenerationNumber = 0; - _config = (config == 0 ? readConfig() : config); - + m_config_retriever= new ConfigRetriever(connect_string, + NDB_VERSION, NDB_MGM_NODE_TYPE_MGM); + // if connect_string explicitly given or + // no config filename is given then + // first try to allocate nodeid from another management server + if ((connect_string || config_filename == NULL) && + (m_config_retriever->do_connect(0,0,0) == 0)) + { + int tmp_nodeid= 0; + tmp_nodeid= m_config_retriever->allocNodeId(0 /*retry*/,0 /*delay*/); + if (tmp_nodeid == 0) + { + ndbout_c(m_config_retriever->getErrorString()); + exit(-1); + } + // read config from other managent server + _config= fetchConfig(); + if (_config == 0) + { + ndbout << m_config_retriever->getErrorString() << endl; + exit(-1); + } + _ownNodeId= tmp_nodeid; + } + + if (_ownNodeId == 0) + { + // read config locally + _config= readConfig(); + if (_config == 0) { + ndbout << "Unable to read config file" << endl; + exit(-1); + } + } + theMgmtWaitForResponseCondPtr = NdbCondition_Create(); m_configMutex = NdbMutex_Create(); @@ -443,9 +485,11 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, nodeTypes[i] = (enum ndb_mgm_node_type)-1; m_connect_address[i].s_addr= 0; } + { - ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator - (config->m_configValues, CFG_SECTION_NODE); + ndb_mgm_configuration_iterator + *iter = ndb_mgm_create_configuration_iterator(_config->m_configValues, + CFG_SECTION_NODE); for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){ unsigned type, id; if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0) @@ -478,8 +522,6 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, } _props = NULL; - _ownNodeId= 0; - NodeId tmp= nodeId; BaseString error_string; if ((m_node_id_mutex = NdbMutex_Create()) == 0) @@ -488,43 +530,25 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, exit(-1); } -#if 0 - char my_hostname[256]; - struct sockaddr_in tmp_addr; - SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr); - if (!g_no_nodeid_checks) { - if (gethostname(my_hostname, sizeof(my_hostname))) { - ndbout << "error: gethostname() - " << strerror(errno) << endl; - exit(-1); - } - if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) { - ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - " - << strerror(errno) << endl; + if (_ownNodeId == 0) // we did not get node id from other server + { + NodeId tmp= m_config_retriever->get_configuration_nodeid(); + + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, + 0, 0, error_string)){ + ndbout << "Unable to obtain requested nodeid: " + << error_string.c_str() << endl; exit(-1); } + _ownNodeId = tmp; } - if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, - (struct sockaddr *)&tmp_addr, - &addrlen, error_string)){ - ndbout << "Unable to obtain requested nodeid: " - << error_string.c_str() << endl; - exit(-1); - } -#else - if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, - 0, 0, error_string)){ - ndbout << "Unable to obtain requested nodeid: " - << error_string.c_str() << endl; - exit(-1); - } -#endif - _ownNodeId = tmp; { DBUG_PRINT("info", ("verifyConfig")); - ConfigRetriever cr(m_local_config, NDB_VERSION, NDB_MGM_NODE_TYPE_MGM); - if (!cr.verifyConfig(config->m_configValues, _ownNodeId)) { - ndbout << cr.getErrorString() << endl; + if (!m_config_retriever->verifyConfig(_config->m_configValues, + _ownNodeId)) + { + ndbout << m_config_retriever->getErrorString() << endl; exit(-1); } } @@ -657,6 +681,8 @@ MgmtSrvr::~MgmtSrvr() NdbThread_WaitFor(m_signalRecvThread, &res); NdbThread_Destroy(&m_signalRecvThread); } + if (m_config_retriever) + delete m_config_retriever; } //**************************************************************************** diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index b3257491123..2ab11250d81 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -175,11 +175,10 @@ public: /* Constructor */ - MgmtSrvr(NodeId nodeId, /* Local nodeid */ - SocketServer *socket_server, - const BaseString &config_filename, /* Where to save config */ - LocalConfig &local_config, /* Ndb.cfg filename */ - Config * config); + MgmtSrvr(SocketServer *socket_server, + const char *config_filename, /* Where to save config */ + const char *connect_string); + int init(); NodeId getOwnNodeId() const {return _ownNodeId;}; /** @@ -538,7 +537,6 @@ private: NdbMutex *m_configMutex; const Config * _config; Config * m_newConfig; - LocalConfig &m_local_config; BaseString m_configFilename; Uint32 m_nextConfigGenerationNumber; @@ -755,6 +753,9 @@ private: Config *_props; int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type); + + ConfigRetriever *m_config_retriever; + public: /** * This method does not exist diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp index 1d51061e909..6c4b4e9ae3c 100644 --- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp +++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp @@ -272,30 +272,20 @@ MgmtSrvr::saveConfig(const Config *conf) { Config * MgmtSrvr::readConfig() { - Config *conf = NULL; - if(m_configFilename.length() != 0) { - /* Use config file */ - InitConfigFileParser parser; - conf = parser.parseConfig(m_configFilename.c_str()); - - if(conf == NULL) { - /* Try to get configuration from other MGM server */ - return fetchConfig(); - } - } + Config *conf; + InitConfigFileParser parser; + conf = parser.parseConfig(m_configFilename.c_str()); return conf; } Config * MgmtSrvr::fetchConfig() { - ConfigRetriever cr(m_local_config, NDB_VERSION, NODE_TYPE_MGM); - struct ndb_mgm_configuration * tmp = cr.getConfig(); + struct ndb_mgm_configuration * tmp = m_config_retriever->getConfig(); if(tmp != 0){ Config * conf = new Config(); conf->m_configValues = tmp; return conf; } - return 0; } diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 76f0679b069..400a5301a19 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -62,7 +62,6 @@ struct MgmGlobals { int non_interactive; int interactive; const char * config_filename; - const char * local_config_filename; /** Stuff found in environment or in local config */ NodeId localNodeId; @@ -70,9 +69,6 @@ struct MgmGlobals { char * interface_name; int port; - /** The configuration of the cluster */ - Config * cluster_config; - /** The Mgmt Server */ MgmtSrvr * mgmObject; @@ -86,9 +82,6 @@ static MgmGlobals glob; /****************************************************************************** * Function prototypes ******************************************************************************/ -static bool readLocalConfig(); -static bool readGlobalConfig(); - /** * Global variables */ @@ -110,10 +103,14 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "connect-string", 1023, + { "ndb-connectstring", 1023, "Set connect string for connecting to ndb_mgmd. " - "<constr>=\"host=<hostname:port>[;nodeid=<id>]\". " - "Overides specifying entries in NDB_CONNECTSTRING and config file", + "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " + "Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", + (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "connect-string", 1023, + "same as --ndb-connectstring.", (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "config-file", 'f', "Specify cluster configuration file", @@ -122,9 +119,6 @@ static struct my_option my_long_options[] = { "daemon", 'd', "Run ndb_mgmd in daemon mode (default)", (gptr*) &glob.daemon, (gptr*) &glob.daemon, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, - { "l", 'l', "Specify configuration file connect string (default Ndb.cfg if available)", - (gptr*) &glob.local_config_filename, (gptr*) &glob.local_config_filename, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "interactive", 256, "Run interactive. Not supported but provided for testing purposes", (gptr*) &glob.interactive, (gptr*) &glob.interactive, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, @@ -194,9 +188,8 @@ int main(int argc, char** argv) #endif global_mgmt_server_check = 1; - glob.config_filename= "config.ini"; - const char *load_default_groups[]= { "ndb_mgmd",0 }; + const char *load_default_groups[]= { "mysql_cluster","ndb_mgmd",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; @@ -212,29 +205,16 @@ int main(int argc, char** argv) MgmApiService * mapi = new MgmApiService(); - /**************************** - * Read configuration files * - ****************************/ - LocalConfig local_config; - if(!local_config.init(opt_connect_str,glob.local_config_filename)){ - local_config.printError(); - goto error_end; - } - glob.localNodeId = local_config._ownNodeId; + glob.mgmObject = new MgmtSrvr(glob.socketServer, + glob.config_filename, + opt_connect_str); - if (!readGlobalConfig()) + if (glob.mgmObject->init()) goto error_end; - glob.mgmObject = new MgmtSrvr(glob.localNodeId, glob.socketServer, - BaseString(glob.config_filename), - local_config, - glob.cluster_config); - chdir(NdbConfig_get_path(0)); - glob.cluster_config = 0; glob.localNodeId= glob.mgmObject->getOwnNodeId(); - if (glob.localNodeId == 0) { goto error_end; } @@ -345,9 +325,7 @@ MgmGlobals::MgmGlobals(){ // Default values port = 0; config_filename = NULL; - local_config_filename = NULL; interface_name = 0; - cluster_config = 0; daemon = 1; non_interactive = 0; interactive = 0; @@ -360,27 +338,6 @@ MgmGlobals::~MgmGlobals(){ delete socketServer; if (mgmObject) delete mgmObject; - if (cluster_config) - delete cluster_config; if (interface_name) free(interface_name); } - -/** - * @fn readGlobalConfig - * @param glob : Global variables - * @return true if success, false otherwise. - */ -static bool -readGlobalConfig() { - if(glob.config_filename == NULL) - return false; - - /* Use config file */ - InitConfigFileParser parser; - glob.cluster_config = parser.parseConfig(glob.config_filename); - if(glob.cluster_config == 0){ - return false; - } - return true; -} diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbConnection.cpp index 4f6468eb4ae..f4bb000300a 100644 --- a/ndb/src/ndbapi/NdbConnection.cpp +++ b/ndb/src/ndbapi/NdbConnection.cpp @@ -55,6 +55,7 @@ NdbConnection::NdbConnection( Ndb* aNdb ) : theFirstExecOpInList(NULL), theLastExecOpInList(NULL), theCompletedFirstOp(NULL), + theCompletedLastOp(NULL), theNoOfOpSent(0), theNoOfOpCompleted(0), theNoOfOpFetched(0), @@ -124,6 +125,7 @@ NdbConnection::init() theLastExecOpInList = NULL; theCompletedFirstOp = NULL; + theCompletedLastOp = NULL; theGlobalCheckpointId = 0; theCommitStatus = Started; @@ -256,6 +258,8 @@ NdbConnection::handleExecuteCompletion() if (tLastExecOp != NULL) { tLastExecOp->next(theCompletedFirstOp); theCompletedFirstOp = tFirstExecOp; + if (theCompletedLastOp == NULL) + theCompletedLastOp = tLastExecOp; theFirstExecOpInList = NULL; theLastExecOpInList = NULL; }//if @@ -292,6 +296,8 @@ NdbConnection::execute(ExecType aTypeOfExec, ExecType tExecType; NdbOperation* tPrepOp; + NdbOperation* tCompletedFirstOp = NULL; + NdbOperation* tCompletedLastOp = NULL; int ret = 0; do { @@ -314,6 +320,7 @@ NdbConnection::execute(ExecType aTypeOfExec, } tPrepOp = tPrepOp->next(); } + // save rest of prepared ops if batch NdbOperation* tRestOp= 0; NdbOperation* tLastOp= 0; @@ -323,6 +330,7 @@ NdbConnection::execute(ExecType aTypeOfExec, tLastOp = theLastOpInList; theLastOpInList = tPrepOp; } + if (tExecType == Commit) { NdbOperation* tOp = theCompletedFirstOp; while (tOp != NULL) { @@ -338,6 +346,19 @@ NdbConnection::execute(ExecType aTypeOfExec, } } + // completed ops are in unspecified order + if (theCompletedFirstOp != NULL) { + if (tCompletedFirstOp == NULL) { + tCompletedFirstOp = theCompletedFirstOp; + tCompletedLastOp = theCompletedLastOp; + } else { + tCompletedLastOp->next(theCompletedFirstOp); + tCompletedLastOp = theCompletedLastOp; + } + theCompletedFirstOp = NULL; + theCompletedLastOp = NULL; + } + if (executeNoBlobs(tExecType, abortOption, forceSend) == -1) ret = -1; #ifndef VM_TRACE @@ -362,6 +383,7 @@ NdbConnection::execute(ExecType aTypeOfExec, tOp = tOp->next(); } } + // add saved prepared ops if batch if (tPrepOp != NULL && tRestOp != NULL) { if (theFirstOpInList == NULL) @@ -373,6 +395,18 @@ NdbConnection::execute(ExecType aTypeOfExec, assert(theFirstOpInList == NULL || tExecType == NoCommit); } while (theFirstOpInList != NULL || tExecType != aTypeOfExec); + if (tCompletedFirstOp != NULL) { + tCompletedLastOp->next(theCompletedFirstOp); + theCompletedFirstOp = tCompletedFirstOp; + if (theCompletedLastOp == NULL) + theCompletedLastOp = tCompletedLastOp; + } +#if ndb_api_count_completed_ops_after_blob_execute + { NdbOperation* tOp; unsigned n = 0; + for (tOp = theCompletedFirstOp; tOp != NULL; tOp = tOp->next()) n++; + ndbout << "completed ops: " << n << endl; + } +#endif DBUG_RETURN(ret); } @@ -894,6 +928,7 @@ NdbConnection::releaseOperations() releaseOps(theFirstExecOpInList); theCompletedFirstOp = NULL; + theCompletedLastOp = NULL; theFirstOpInList = NULL; theFirstExecOpInList = NULL; theLastOpInList = NULL; @@ -909,6 +944,7 @@ NdbConnection::releaseCompletedOperations() { releaseOps(theCompletedFirstOp); theCompletedFirstOp = NULL; + theCompletedLastOp = NULL; }//NdbConnection::releaseOperations() /****************************************************************************** @@ -1086,8 +1122,11 @@ NdbConnection::getNdbIndexScanOperation(const NdbIndexImpl* index, if (indexTable != 0){ NdbIndexScanOperation* tOp = getNdbScanOperation((NdbTableImpl *) indexTable); - tOp->m_currentTable = table; - if(tOp) tOp->m_cursor_type = NdbScanOperation::IndexCursor; + if(tOp) + { + tOp->m_currentTable = table; + tOp->m_cursor_type = NdbScanOperation::IndexCursor; + } return tOp; } else { setOperationErrorCodeAbort(theNdb->theError.code); @@ -1582,9 +1621,6 @@ from other transactions. /** * There's always a TCKEYCONF when using IgnoreError */ -#ifdef VM_TRACE - ndbout_c("Not completing transaction 2"); -#endif return -1; } /**********************************************************************/ @@ -1836,9 +1872,6 @@ NdbConnection::OpCompleteFailure(Uint8 abortOption, bool setFailure) /** * There's always a TCKEYCONF when using IgnoreError */ -#ifdef VM_TRACE - ndbout_c("Not completing transaction"); -#endif return -1; } diff --git a/ndb/src/ndbapi/NdbConnectionScan.cpp b/ndb/src/ndbapi/NdbConnectionScan.cpp index 3fe8993a42b..a1a220caacf 100644 --- a/ndb/src/ndbapi/NdbConnectionScan.cpp +++ b/ndb/src/ndbapi/NdbConnectionScan.cpp @@ -57,12 +57,18 @@ NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){ if(checkState_TransId(&ref->transId1)){ theScanningOp->theError.code = ref->errorCode; + theScanningOp->execCLOSE_SCAN_REP(); if(!ref->closeNeeded){ - theScanningOp->execCLOSE_SCAN_REP(); return 0; } - assert(theScanningOp->m_sent_receivers_count); + + /** + * Setup so that close_impl will actually perform a close + * and not "close scan"-optimze it away + */ theScanningOp->m_conf_receivers_count++; + theScanningOp->m_conf_receivers[0] = theScanningOp->m_receivers[0]; + theScanningOp->m_conf_receivers[0]->m_tcPtrI = ~0; return 0; } else { #ifdef NDB_NO_DROPPED_SIGNAL @@ -97,7 +103,7 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal, theScanningOp->execCLOSE_SCAN_REP(); return 0; } - + for(Uint32 i = 0; i<len; i += 3){ Uint32 opCount, totalLen; Uint32 ptrI = * ops++; @@ -109,24 +115,12 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal, void * tPtr = theNdb->int2void(ptrI); assert(tPtr); // For now NdbReceiver* tOp = theNdb->void2rec(tPtr); - if (tOp && tOp->checkMagicNumber()){ - if(tOp->execSCANOPCONF(tcPtrI, totalLen, opCount)){ - /** - * - */ - theScanningOp->receiver_delivered(tOp); - } else if(info == ScanTabConf::EndOfData){ + if (tOp && tOp->checkMagicNumber()) + { + if (tcPtrI == RNIL && opCount == 0) theScanningOp->receiver_completed(tOp); - } - } - } - if (conf->requestInfo & ScanTabConf::EndOfData) { - if(theScanningOp->m_ordered) - theScanningOp->m_api_receivers_count = 0; - if(theScanningOp->m_api_receivers_count + - theScanningOp->m_conf_receivers_count + - theScanningOp->m_sent_receivers_count){ - abort(); + else if (tOp->execSCANOPCONF(tcPtrI, totalLen, opCount)) + theScanningOp->receiver_delivered(tOp); } } return 0; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 304d1b904d4..345f2caac89 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1571,7 +1571,13 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; - if (alter) { + + LinearSectionPtr ptr[3]; + ptr[0].p = (Uint32*)m_buffer.get_data(); + ptr[0].sz = m_buffer.length() / 4; + int ret; + if (alter) + { AlterTableReq * const req = CAST_PTR(AlterTableReq, tSignal.getDataPtrSend()); @@ -1582,8 +1588,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, req->tableVersion = impl.m_version;; tSignal.theVerId_signalNumber = GSN_ALTER_TABLE_REQ; tSignal.theLength = AlterTableReq::SignalLength; + ret= alterTable(&tSignal, ptr); } - else { + else + { CreateTableReq * const req = CAST_PTR(CreateTableReq, tSignal.getDataPtrSend()); @@ -1591,25 +1599,21 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, req->senderData = 0; tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ; tSignal.theLength = CreateTableReq::SignalLength; - } - - LinearSectionPtr ptr[3]; - ptr[0].p = (Uint32*)m_buffer.get_data(); - ptr[0].sz = m_buffer.length() / 4; - - int ret = (alter) ? - alterTable(&tSignal, ptr) - : createTable(&tSignal, ptr); - - if (!alter && haveAutoIncrement) { - if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), - autoIncrementValue)) { - if (ndb.theError.code == 0) { - m_error.code = 4336; - ndb.theError = m_error; - } else - m_error= ndb.theError; - ret = -1; // errorcode set in initialize_autoincrement + ret= createTable(&tSignal, ptr); + + if (ret) + return ret; + + if (haveAutoIncrement) { + if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), + autoIncrementValue)) { + if (ndb.theError.code == 0) { + m_error.code = 4336; + ndb.theError = m_error; + } else + m_error= ndb.theError; + ret = -1; // errorcode set in initialize_autoincrement + } } } return ret; diff --git a/ndb/src/ndbapi/NdbResultSet.cpp b/ndb/src/ndbapi/NdbResultSet.cpp index f270584d227..d9d71464026 100644 --- a/ndb/src/ndbapi/NdbResultSet.cpp +++ b/ndb/src/ndbapi/NdbResultSet.cpp @@ -44,10 +44,10 @@ void NdbResultSet::init() { } -int NdbResultSet::nextResult(bool fetchAllowed) +int NdbResultSet::nextResult(bool fetchAllowed, bool forceSend) { int res; - if ((res = m_operation->nextResult(fetchAllowed)) == 0) { + if ((res = m_operation->nextResult(fetchAllowed, forceSend)) == 0) { // handle blobs NdbBlob* tBlob = m_operation->theBlobList; while (tBlob != 0) { @@ -67,9 +67,9 @@ int NdbResultSet::nextResult(bool fetchAllowed) return res; } -void NdbResultSet::close() +void NdbResultSet::close(bool forceSend) { - m_operation->closeScan(); + m_operation->closeScan(forceSend); } NdbOperation* @@ -98,6 +98,6 @@ NdbResultSet::deleteTuple(NdbConnection * takeOverTrans){ } int -NdbResultSet::restart(){ - return m_operation->restart(); +NdbResultSet::restart(bool forceSend){ + return m_operation->restart(forceSend); } diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 4b10ebb10cd..db0c294708d 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -35,6 +35,8 @@ #include <signaldata/AttrInfo.hpp> #include <signaldata/TcKeyReq.hpp> +#define DEBUG_NEXT_RESULT 0 + NdbScanOperation::NdbScanOperation(Ndb* aNdb) : NdbOperation(aNdb), m_resultSet(0), @@ -275,6 +277,9 @@ NdbScanOperation::fix_receivers(Uint32 parallel){ void NdbScanOperation::receiver_delivered(NdbReceiver* tRec){ if(theError.code == 0){ + if(DEBUG_NEXT_RESULT) + ndbout_c("receiver_delivered"); + Uint32 idx = tRec->m_list_index; Uint32 last = m_sent_receivers_count - 1; if(idx != last){ @@ -298,6 +303,9 @@ NdbScanOperation::receiver_delivered(NdbReceiver* tRec){ void NdbScanOperation::receiver_completed(NdbReceiver* tRec){ if(theError.code == 0){ + if(DEBUG_NEXT_RESULT) + ndbout_c("receiver_completed"); + Uint32 idx = tRec->m_list_index; Uint32 last = m_sent_receivers_count - 1; if(idx != last){ @@ -445,12 +453,12 @@ NdbScanOperation::executeCursor(int nodeId){ return -1; } -#define DEBUG_NEXT_RESULT 0 -int NdbScanOperation::nextResult(bool fetchAllowed) +int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend) { if(m_ordered) - return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed); + return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed, + forceSend); /** * Check current receiver @@ -487,7 +495,8 @@ int NdbScanOperation::nextResult(bool fetchAllowed) TransporterFacade* tp = TransporterFacade::instance(); Guard guard(tp->theMutexPtr); Uint32 seq = theNdbCon->theNodeSequence; - if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false) == 0){ + if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false, + forceSend) == 0){ idx = m_current_api_receiver; last = m_api_receivers_count; @@ -578,8 +587,9 @@ int NdbScanOperation::nextResult(bool fetchAllowed) } int -NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){ - if(cnt > 0 || stopScanFlag){ +NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag, + bool forceSend){ + if(cnt > 0){ NdbApiSignal tSignal(theNdb->theMyRef); tSignal.setSignal(GSN_SCAN_NEXTREQ); @@ -595,38 +605,57 @@ NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){ */ Uint32 last = m_sent_receivers_count; Uint32 * prep_array = (cnt > 21 ? m_prepared_receivers : theData + 4); + Uint32 sent = 0; for(Uint32 i = 0; i<cnt; i++){ NdbReceiver * tRec = m_api_receivers[i]; - m_sent_receivers[last+i] = tRec; - tRec->m_list_index = last+i; - prep_array[i] = tRec->m_tcPtrI; - tRec->prepareSend(); + if((prep_array[sent] = tRec->m_tcPtrI) != RNIL) + { + m_sent_receivers[last+sent] = tRec; + tRec->m_list_index = last+sent; + tRec->prepareSend(); + sent++; + } } - memcpy(&m_api_receivers[0], &m_api_receivers[cnt], cnt * sizeof(char*)); + memmove(m_api_receivers, m_api_receivers+cnt, + (theParallelism-cnt) * sizeof(char*)); - Uint32 nodeId = theNdbCon->theDBnode; - TransporterFacade * tp = TransporterFacade::instance(); - int ret; - if(cnt > 21){ - tSignal.setLength(4); - LinearSectionPtr ptr[3]; - ptr[0].p = prep_array; - ptr[0].sz = cnt; - ret = tp->sendSignal(&tSignal, nodeId, ptr, 1); - } else { - tSignal.setLength(4+cnt); - ret = tp->sendSignal(&tSignal, nodeId); + int ret = 0; + if(sent) + { + Uint32 nodeId = theNdbCon->theDBnode; + TransporterFacade * tp = TransporterFacade::instance(); + if(cnt > 21){ + tSignal.setLength(4); + LinearSectionPtr ptr[3]; + ptr[0].p = prep_array; + ptr[0].sz = sent; + ret = tp->sendSignal(&tSignal, nodeId, ptr, 1); + } else { + tSignal.setLength(4+sent); + ret = tp->sendSignal(&tSignal, nodeId); + } } + + if (!ret) checkForceSend(forceSend); - m_sent_receivers_count = last + cnt + stopScanFlag; + m_sent_receivers_count = last + sent; m_api_receivers_count -= cnt; m_current_api_receiver = 0; - + return ret; } return 0; } +void NdbScanOperation::checkForceSend(bool forceSend) +{ + if (forceSend) { + TransporterFacade::instance()->forceSend(theNdb->theNdbBlockNumber); + } else { + TransporterFacade::instance()->checkForceSend(theNdb->theNdbBlockNumber); + }//if +} + int NdbScanOperation::prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId) { @@ -642,7 +671,7 @@ NdbScanOperation::doSend(int ProcessorId) return 0; } -void NdbScanOperation::closeScan() +void NdbScanOperation::closeScan(bool forceSend) { if(m_transConnection){ if(DEBUG_NEXT_RESULT) @@ -657,7 +686,7 @@ void NdbScanOperation::closeScan() TransporterFacade* tp = TransporterFacade::instance(); Guard guard(tp->theMutexPtr); - close_impl(tp); + close_impl(tp, forceSend); } while(0); @@ -673,6 +702,7 @@ NdbScanOperation::execCLOSE_SCAN_REP(){ m_api_receivers_count = 0; m_conf_receivers_count = 0; m_sent_receivers_count = 0; + m_current_api_receiver = m_ordered ? theParallelism : 0; } void NdbScanOperation::release() @@ -1293,7 +1323,8 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols, } int -NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){ +NdbIndexScanOperation::next_result_ordered(bool fetchAllowed, + bool forceSend){ Uint32 u_idx = 0, u_last = 0; Uint32 s_idx = m_current_api_receiver; // first sorted @@ -1319,7 +1350,8 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){ Guard guard(tp->theMutexPtr); Uint32 seq = theNdbCon->theNodeSequence; Uint32 nodeId = theNdbCon->theDBnode; - if(seq == tp->getNodeSequence(nodeId) && !send_next_scan_ordered(s_idx)){ + if(seq == tp->getNodeSequence(nodeId) && + !send_next_scan_ordered(s_idx, forceSend)){ Uint32 tmp = m_sent_receivers_count; s_idx = m_current_api_receiver; while(m_sent_receivers_count > 0 && !theError.code){ @@ -1408,14 +1440,26 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){ } int -NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){ +NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx, bool forceSend){ if(idx == theParallelism) return 0; + NdbReceiver* tRec = m_api_receivers[idx]; NdbApiSignal tSignal(theNdb->theMyRef); tSignal.setSignal(GSN_SCAN_NEXTREQ); + Uint32 last = m_sent_receivers_count; Uint32* theData = tSignal.getDataPtrSend(); + Uint32* prep_array = theData + 4; + + m_current_api_receiver = idx + 1; + if((prep_array[0] = tRec->m_tcPtrI) == RNIL) + { + if(DEBUG_NEXT_RESULT) + ndbout_c("receiver completed, don't send"); + return 0; + } + theData[0] = theNdbCon->theTCConPtr; theData[1] = 0; Uint64 transId = theNdbCon->theTransactionId; @@ -1425,35 +1469,35 @@ NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){ /** * Prepare ops */ - Uint32 last = m_sent_receivers_count; - Uint32 * prep_array = theData + 4; - - NdbReceiver * tRec = m_api_receivers[idx]; m_sent_receivers[last] = tRec; tRec->m_list_index = last; - prep_array[0] = tRec->m_tcPtrI; tRec->prepareSend(); - m_sent_receivers_count = last + 1; - m_current_api_receiver = idx + 1; Uint32 nodeId = theNdbCon->theDBnode; TransporterFacade * tp = TransporterFacade::instance(); tSignal.setLength(4+1); - return tp->sendSignal(&tSignal, nodeId); + int ret= tp->sendSignal(&tSignal, nodeId); + if (!ret) checkForceSend(forceSend); + return ret; } int -NdbScanOperation::close_impl(TransporterFacade* tp){ +NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){ Uint32 seq = theNdbCon->theNodeSequence; Uint32 nodeId = theNdbCon->theDBnode; - if(seq != tp->getNodeSequence(nodeId)){ + if(seq != tp->getNodeSequence(nodeId)) + { theNdbCon->theReleaseOnClose = true; return -1; } - while(theError.code == 0 && m_sent_receivers_count){ + /** + * Wait for outstanding + */ + while(theError.code == 0 && m_sent_receivers_count) + { theNdb->theWaiter.m_node = nodeId; theNdb->theWaiter.m_state = WAIT_SCAN; int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); @@ -1471,18 +1515,52 @@ NdbScanOperation::close_impl(TransporterFacade* tp){ } } - if(m_api_receivers_count+m_conf_receivers_count){ - // Send close scan - if(send_next_scan(0, true) == -1){ // Close scan - theNdbCon->theReleaseOnClose = true; - return -1; - } + /** + * move all conf'ed into api + * so that send_next_scan can check if they needs to be closed + */ + Uint32 api = m_api_receivers_count; + Uint32 conf = m_conf_receivers_count; + + if(m_ordered) + { + /** + * Ordered scan, keep the m_api_receivers "to the right" + */ + memmove(m_api_receivers, m_api_receivers+m_current_api_receiver, + (theParallelism - m_current_api_receiver) * sizeof(char*)); + api = (theParallelism - m_current_api_receiver); + m_api_receivers_count = api; + } + + if(DEBUG_NEXT_RESULT) + ndbout_c("close_impl: [order api conf sent curr parr] %d %d %d %d %d %d", + m_ordered, api, conf, + m_sent_receivers_count, m_current_api_receiver, theParallelism); + + if(api+conf) + { + /** + * There's something to close + * setup m_api_receivers (for send_next_scan) + */ + memcpy(m_api_receivers+api, m_conf_receivers, conf * sizeof(char*)); + m_api_receivers_count = api + conf; + m_conf_receivers_count = 0; + } + + // Send close scan + if(send_next_scan(api+conf, true, forceSend) == -1) + { + theNdbCon->theReleaseOnClose = true; + return -1; } /** * wait for close scan conf */ - while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count){ + while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count) + { theNdb->theWaiter.m_node = nodeId; theNdb->theWaiter.m_state = WAIT_SCAN; int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); @@ -1499,6 +1577,7 @@ NdbScanOperation::close_impl(TransporterFacade* tp){ return -1; } } + return 0; } @@ -1520,7 +1599,7 @@ NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){ } int -NdbScanOperation::restart() +NdbScanOperation::restart(bool forceSend) { TransporterFacade* tp = TransporterFacade::instance(); @@ -1529,7 +1608,7 @@ NdbScanOperation::restart() { int res; - if((res= close_impl(tp))) + if((res= close_impl(tp, forceSend))) { return res; } @@ -1548,13 +1627,13 @@ NdbScanOperation::restart() } int -NdbIndexScanOperation::reset_bounds(){ +NdbIndexScanOperation::reset_bounds(bool forceSend){ int res; { TransporterFacade* tp = TransporterFacade::instance(); Guard guard(tp->theMutexPtr); - res= close_impl(tp); + res= close_impl(tp, forceSend); } if(!res) diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 4c42fe1aeef..b2043b2c2c1 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -45,7 +45,6 @@ Ndb_cluster_connection::Ndb_cluster_connection(const char *connect_string) else m_connect_string= 0; m_config_retriever= 0; - m_local_config= 0; m_connect_thread= 0; m_connect_callback= 0; @@ -125,38 +124,31 @@ int Ndb_cluster_connection::connect(int reconnect) do { if (m_config_retriever == 0) { - if (m_local_config == 0) { - m_local_config= new LocalConfig(); - if (!m_local_config->init(m_connect_string,0)) { - ndbout_c("Configuration error: Unable to retrieve local config"); - m_local_config->printError(); - m_local_config->printUsage(); - DBUG_RETURN(-1); - } - } m_config_retriever= - new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API); + new ConfigRetriever(m_connect_string, NDB_VERSION, NODE_TYPE_API); + if (m_config_retriever->hasError()) + { + printf("Could not connect initialize handle to management server", + m_config_retriever->getErrorString()); + DBUG_RETURN(-1); + } } else if (reconnect == 0) DBUG_RETURN(0); if (reconnect) { - int r= m_config_retriever->do_connect(1); + int r= m_config_retriever->do_connect(0,0,0); if (r == 1) DBUG_RETURN(1); // mgmt server not up yet if (r == -1) break; } else - if(m_config_retriever->do_connect() == -1) + if(m_config_retriever->do_connect(12,5,1) == -1) break; - Uint32 nodeId = m_config_retriever->allocNodeId(); - for(Uint32 i = 0; nodeId == 0 && i<5; i++){ - NdbSleep_SecSleep(3); - nodeId = m_config_retriever->allocNodeId(); - } + Uint32 nodeId = m_config_retriever->allocNodeId(4/*retries*/,3/*delay*/); if(nodeId == 0) break; ndb_mgm_configuration * props = m_config_retriever->getConfig(); @@ -200,8 +192,6 @@ Ndb_cluster_connection::~Ndb_cluster_connection() my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR)); if (m_config_retriever) delete m_config_retriever; - if (m_local_config) - delete m_local_config; DBUG_VOID_RETURN; } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 1a0d8132d71..bc49358cc63 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -53,6 +53,9 @@ typedef struct ErrorBundle { #define NI ndberror_cl_function_not_implemented #define UE ndberror_cl_unknown_error_code +static const char REDO_BUFFER_MSG[]= +"REDO log buffers overloaded, consult online manual (increase RedoBuffer, and|or decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)"; + static const char* empty_string = ""; static @@ -137,9 +140,8 @@ ErrorBundle ErrorCodes[] = { { 805, TR, "Out of attrinfo records in tuple manager" }, { 830, TR, "Out of add fragment operation records" }, { 873, TR, "Out of attrinfo records for scan in tuple manager" }, - { 1217, TR, "1217" }, - { 1219, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" }, - { 1220, TR, "1220" }, + { 1217, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" }, + { 1220, TR, REDO_BUFFER_MSG }, { 1222, TR, "Out of transaction markers in LQH" }, { 4021, TR, "Out of Send Buffer space in NDB API" }, { 4022, TR, "Out of Send Buffer space in NDB API" }, @@ -165,14 +167,13 @@ ErrorBundle ErrorCodes[] = { { 297, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */ { 237, TO, "Transaction had timed out when trying to commit it" }, - /** * OverloadError */ - { 410, OL, "Out of log file space temporarily" }, + { 410, OL, REDO_BUFFER_MSG }, { 677, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" }, { 891, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" }, - { 1221, OL, "REDO log buffers overloaded (increase RedoBuffer)" }, + { 1221, OL, REDO_BUFFER_MSG }, { 4006, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, @@ -425,7 +426,8 @@ ErrorBundle ErrorCodes[] = { { 4267, IE, "Corrupted blob value" }, { 4268, IE, "Error in blob head update forced rollback of transaction" }, { 4268, IE, "Unknown blob error" }, - { 4269, IE, "No connection to ndb management server" } + { 4269, IE, "No connection to ndb management server" }, + { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" } }; static diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp index 19e4cb43336..b833f2ac629 100644 --- a/ndb/test/include/HugoTransactions.hpp +++ b/ndb/test/include/HugoTransactions.hpp @@ -36,15 +36,21 @@ public: bool allowConstraintViolation = true, int doSleep = 0, bool oneTrans = false); + int scanReadRecords(Ndb*, int records, int abort = 0, int parallelism = 0, - bool committed = false); - int scanReadCommittedRecords(Ndb*, - int records, - int abort = 0, - int parallelism = 0); + NdbOperation::LockMode = NdbOperation::LM_Read); + + int scanReadRecords(Ndb*, + const NdbDictionary::Index*, + int records, + int abort = 0, + int parallelism = 0, + NdbOperation::LockMode = NdbOperation::LM_Read, + bool sorted = false); + int pkReadRecords(Ndb*, int records, int batchsize = 1, diff --git a/ndb/test/include/NdbRestarter.hpp b/ndb/test/include/NdbRestarter.hpp index 114726f6a2b..19a88b4f8ad 100644 --- a/ndb/test/include/NdbRestarter.hpp +++ b/ndb/test/include/NdbRestarter.hpp @@ -87,8 +87,6 @@ protected: bool connected; BaseString addr; - BaseString host; - int port; NdbMgmHandle handle; ndb_mgm_configuration * m_config; protected: diff --git a/ndb/test/include/UtilTransactions.hpp b/ndb/test/include/UtilTransactions.hpp index 37cd99550a5..23902f3b317 100644 --- a/ndb/test/include/UtilTransactions.hpp +++ b/ndb/test/include/UtilTransactions.hpp @@ -53,11 +53,11 @@ public: int selectCount(Ndb*, int parallelism = 0, int* count_rows = NULL, - ScanLock lock = SL_Read, + NdbOperation::LockMode lm = NdbOperation::LM_CommittedRead, NdbConnection* pTrans = NULL); int scanReadRecords(Ndb*, int parallelism, - bool exclusive, + NdbOperation::LockMode lm, int records, int noAttribs, int* attrib_list, diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp index efa0811aa39..4b532856709 100644 --- a/ndb/test/ndbapi/testBlobs.cpp +++ b/ndb/test/ndbapi/testBlobs.cpp @@ -22,6 +22,7 @@ #include <NdbMain.h> #include <NdbOut.hpp> #include <NdbTest.hpp> +#include <NdbTick.h> struct Bcol { bool m_nullable; @@ -59,6 +60,9 @@ struct Opt { bool m_oneblob; Bcol m_blob1; Bcol m_blob2; + // perf + const char* m_tnameperf; + unsigned m_rowsperf; // bugs int m_bug; int (*m_bugtest)(); @@ -84,6 +88,9 @@ struct Opt { m_oneblob(false), m_blob1(false, 7, 1137, 10), m_blob2(true, 99, 55, 1), + // perf + m_tnameperf("TBLOB2"), + m_rowsperf(10000), // bugs m_bug(0), m_bugtest(0) { @@ -107,6 +114,7 @@ printusage() << " -loop N loop N times 0=forever [" << d.m_loop << "]" << endl << " -parts N max parts in blob value [" << d.m_parts << "]" << endl << " -rows N number of rows [" << d.m_rows << "]" << endl + << " -rowsperf N rows for performace test [" << d.m_rowsperf << "]" << endl << " -seed N random seed 0=loop number [" << d.m_seed << "]" << endl << " -skip xxx skip given tests (see list) [no tests]" << endl << " -test xxx only given tests (see list) [all tests]" << endl @@ -118,6 +126,7 @@ printusage() << " i hash index ops" << endl << " s table scans" << endl << " r ordered index scans" << endl + << " p performance test" << endl << "additional flags for test/skip" << endl << " u update existing blob value" << endl << " n normal insert and update" << endl @@ -1381,6 +1390,292 @@ testmain() return 0; } +// separate performance test + +struct Tmr { // stolen from testOIBasic + Tmr() { + clr(); + } + void clr() { + m_on = m_ms = m_cnt = m_time[0] = m_text[0] = 0; + } + void on() { + assert(m_on == 0); + m_on = NdbTick_CurrentMillisecond(); + } + void off(unsigned cnt = 0) { + NDB_TICKS off = NdbTick_CurrentMillisecond(); + assert(m_on != 0 && off >= m_on); + m_ms += off - m_on; + m_cnt += cnt; + m_on = 0; + } + const char* time() { + if (m_cnt == 0) + sprintf(m_time, "%u ms", m_ms); + else + sprintf(m_time, "%u ms per %u ( %u ms per 1000 )", m_ms, m_cnt, (1000 * m_ms) / m_cnt); + return m_time; + } + const char* pct (const Tmr& t1) { + if (0 < t1.m_ms) + sprintf(m_text, "%u pct", (100 * m_ms) / t1.m_ms); + else + sprintf(m_text, "[cannot measure]"); + return m_text; + } + const char* over(const Tmr& t1) { + if (0 < t1.m_ms) { + if (t1.m_ms <= m_ms) + sprintf(m_text, "%u pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms); + else + sprintf(m_text, "-%u pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms); + } else + sprintf(m_text, "[cannot measure]"); + return m_text; + } + NDB_TICKS m_on; + unsigned m_ms; + unsigned m_cnt; + char m_time[100]; + char m_text[100]; +}; + +static int +testperf() +{ + if (! testcase('p')) + return 0; + DBG("=== perf test ==="); + g_ndb = new Ndb("TEST_DB"); + CHK(g_ndb->init() == 0); + CHK(g_ndb->waitUntilReady() == 0); + g_dic = g_ndb->getDictionary(); + NdbDictionary::Table tab(g_opt.m_tnameperf); + if (g_dic->getTable(tab.getName()) != 0) + CHK(g_dic->dropTable(tab) == 0); + // col A - pk + { NdbDictionary::Column col("A"); + col.setType(NdbDictionary::Column::Unsigned); + col.setPrimaryKey(true); + tab.addColumn(col); + } + // col B - char 20 + { NdbDictionary::Column col("B"); + col.setType(NdbDictionary::Column::Char); + col.setLength(20); + col.setNullable(true); + tab.addColumn(col); + } + // col C - text + { NdbDictionary::Column col("C"); + col.setType(NdbDictionary::Column::Text); + col.setInlineSize(20); + col.setPartSize(512); + col.setStripeSize(1); + col.setNullable(true); + tab.addColumn(col); + } + // create + CHK(g_dic->createTable(tab) == 0); + Uint32 cA = 0, cB = 1, cC = 2; + // timers + Tmr t1; + Tmr t2; + // insert char (one trans) + { + DBG("--- insert char ---"); + t1.on(); + CHK((g_con = g_ndb->startTransaction()) != 0); + for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) { + CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0); + CHK(g_opr->insertTuple() == 0); + CHK(g_opr->equal(cA, (char*)&k) == 0); + CHK(g_opr->setValue(cB, "b") == 0); + CHK(g_con->execute(NoCommit) == 0); + } + t1.off(g_opt.m_rowsperf); + CHK(g_con->execute(Rollback) == 0); + DBG(t1.time()); + g_opr = 0; + g_con = 0; + } + // insert text (one trans) + { + DBG("--- insert text ---"); + t2.on(); + CHK((g_con = g_ndb->startTransaction()) != 0); + for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) { + CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0); + CHK(g_opr->insertTuple() == 0); + CHK(g_opr->equal(cA, (char*)&k) == 0); + CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0); + CHK((g_bh1->setValue("c", 1) == 0)); + CHK(g_con->execute(NoCommit) == 0); + } + t2.off(g_opt.m_rowsperf); + CHK(g_con->execute(Rollback) == 0); + DBG(t2.time()); + g_bh1 = 0; + g_opr = 0; + g_con = 0; + } + // insert overhead + DBG("insert overhead: " << t2.over(t1)); + t1.clr(); + t2.clr(); + // insert + { + DBG("--- insert for read test ---"); + unsigned n = 0; + CHK((g_con = g_ndb->startTransaction()) != 0); + for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) { + CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0); + CHK(g_opr->insertTuple() == 0); + CHK(g_opr->equal(cA, (char*)&k) == 0); + CHK(g_opr->setValue(cB, "b") == 0); + CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0); + CHK((g_bh1->setValue("c", 1) == 0)); + if (++n == g_opt.m_batch) { + CHK(g_con->execute(Commit) == 0); + g_ndb->closeTransaction(g_con); + CHK((g_con = g_ndb->startTransaction()) != 0); + n = 0; + } + } + if (n != 0) { + CHK(g_con->execute(Commit) == 0); + n = 0; + } + g_bh1 = 0; + g_opr = 0; + g_con = 0; + } + // pk read char (one trans) + { + DBG("--- pk read char ---"); + CHK((g_con = g_ndb->startTransaction()) != 0); + Uint32 a; + char b[20]; + t1.on(); + for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) { + CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0); + CHK(g_opr->readTuple() == 0); + CHK(g_opr->equal(cA, (char*)&k) == 0); + CHK(g_opr->getValue(cA, (char*)&a) != 0); + CHK(g_opr->getValue(cB, b) != 0); + a = (Uint32)-1; + b[0] = 0; + CHK(g_con->execute(NoCommit) == 0); + CHK(a == k && strcmp(b, "b") == 0); + } + CHK(g_con->execute(Commit) == 0); + t1.off(g_opt.m_rowsperf); + DBG(t1.time()); + g_opr = 0; + g_con = 0; + } + // pk read text (one trans) + { + DBG("--- pk read text ---"); + CHK((g_con = g_ndb->startTransaction()) != 0); + Uint32 a; + char c[20]; + t2.on(); + for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) { + CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0); + CHK(g_opr->readTuple() == 0); + CHK(g_opr->equal(cA, (char*)&k) == 0); + CHK(g_opr->getValue(cA, (char*)&a) != 0); + CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0); + a = (Uint32)-1; + c[0] = 0; + CHK(g_con->execute(NoCommit) == 0); + Uint32 m = 20; + CHK(g_bh1->readData(c, m) == 0); + CHK(a == k && m == 1 && strcmp(c, "c") == 0); + } + CHK(g_con->execute(Commit) == 0); + t2.off(g_opt.m_rowsperf); + DBG(t2.time()); + g_opr = 0; + g_con = 0; + } + // pk read overhead + DBG("pk read overhead: " << t2.over(t1)); + t1.clr(); + t2.clr(); + // scan read char + { + DBG("--- scan read char ---"); + NdbResultSet* rs; + Uint32 a; + char b[20]; + CHK((g_con = g_ndb->startTransaction()) != 0); + CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0); + CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0); + CHK(g_ops->getValue(cA, (char*)&a) != 0); + CHK(g_ops->getValue(cB, b) != 0); + CHK(g_con->execute(NoCommit) == 0); + unsigned n = 0; + t1.on(); + while (1) { + a = (Uint32)-1; + b[0] = 0; + int ret; + CHK((ret = rs->nextResult(true)) == 0 || ret == 1); + if (ret == 1) + break; + CHK(a < g_opt.m_rowsperf && strcmp(b, "b") == 0); + n++; + } + CHK(n == g_opt.m_rowsperf); + t1.off(g_opt.m_rowsperf); + DBG(t1.time()); + g_ops = 0; + g_con = 0; + } + // scan read text + { + DBG("--- read text ---"); + NdbResultSet* rs; + Uint32 a; + char c[20]; + CHK((g_con = g_ndb->startTransaction()) != 0); + CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0); + CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0); + CHK(g_ops->getValue(cA, (char*)&a) != 0); + CHK((g_bh1 = g_ops->getBlobHandle(cC)) != 0); + CHK(g_con->execute(NoCommit) == 0); + unsigned n = 0; + t2.on(); + while (1) { + a = (Uint32)-1; + c[0] = 0; + int ret; + CHK((ret = rs->nextResult(true)) == 0 || ret == 1); + if (ret == 1) + break; + Uint32 m = 20; + CHK(g_bh1->readData(c, m) == 0); + CHK(a < g_opt.m_rowsperf && m == 1 && strcmp(c, "c") == 0); + n++; + } + CHK(n == g_opt.m_rowsperf); + t2.off(g_opt.m_rowsperf); + DBG(t2.time()); + g_bh1 = 0; + g_ops = 0; + g_con = 0; + } + // scan read overhead + DBG("scan read overhead: " << t2.over(t1)); + t1.clr(); + t2.clr(); + delete g_ndb; + return 0; +} + // bug tests static int @@ -1498,6 +1793,12 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535) continue; } } + if (strcmp(arg, "-rowsperf") == 0) { + if (++argv, --argc > 0) { + g_opt.m_rowsperf = atoi(argv[0]); + continue; + } + } if (strcmp(arg, "-seed") == 0) { if (++argv, --argc > 0) { g_opt.m_seed = atoi(argv[0]); @@ -1558,7 +1859,7 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535) strcat(b, "r"); g_opt.m_skip = strdup(b); } - if (testmain() == -1) { + if (testmain() == -1 || testperf() == -1) { ndbout << "line " << __LINE__ << " FAIL loop=" << g_loop << endl; return NDBT_ProgramExit(NDBT_FAILED); } diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 712ab2e4d25..0a43bb02fff 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1480,8 +1480,10 @@ runTestDictionaryPerf(NDBT_Context* ctx, NDBT_Step* step){ } int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){ + static int acclst[] = { 3001 }; static int tuplst[] = { 4007, 4008, 4009, 4010, 4011, 4012 }; static int tuxlst[] = { 12001, 12002, 12003, 12004, 12005, 12006 }; + static unsigned acccnt = sizeof(acclst)/sizeof(acclst[0]); static unsigned tupcnt = sizeof(tuplst)/sizeof(tuplst[0]); static unsigned tuxcnt = sizeof(tuxlst)/sizeof(tuxlst[0]); @@ -1509,6 +1511,19 @@ int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){ (void)pDic->dropTable(tab.getName()); for (int l = 0; l < loops; l++) { + for (unsigned i0 = 0; i0 < acccnt; i0++) { + unsigned j = (l == 0 ? i0 : myRandom48(acccnt)); + int errval = acclst[j]; + g_info << "insert error node=" << nodeId << " value=" << errval << endl; + CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0, + "failed to set error insert"); + CHECK2(pDic->createTable(tab) != 0, + "failed to fail after error insert " << errval); + CHECK2(pDic->createTable(tab) == 0, + pDic->getNdbError()); + CHECK2(pDic->dropTable(tab.getName()) == 0, + pDic->getNdbError()); + } for (unsigned i1 = 0; i1 < tupcnt; i1++) { unsigned j = (l == 0 ? i1 : myRandom48(tupcnt)); int errval = tuplst[j]; @@ -1638,7 +1653,7 @@ TESTCASE("DictionaryPerf", INITIALIZER(runTestDictionaryPerf); } TESTCASE("FailAddFragment", - "Fail add fragment or attribute in TUP or TUX\n"){ + "Fail add fragment or attribute in ACC or TUP or TUX\n"){ INITIALIZER(runFailAddFragment); } NDBT_TESTSUITE_END(testDict); @@ -1650,5 +1665,3 @@ int main(int argc, const char** argv){ myRandom48Init(NdbTick_CurrentMillisecond()); return testDict.execute(argc, argv); } - - diff --git a/ndb/test/ndbapi/testReadPerf.cpp b/ndb/test/ndbapi/testReadPerf.cpp index 380a809ad00..3adcb5a2d9b 100644 --- a/ndb/test/ndbapi/testReadPerf.cpp +++ b/ndb/test/ndbapi/testReadPerf.cpp @@ -391,8 +391,15 @@ run_read(){ void print_result(){ + int tmp = 1; + tmp *= g_paramters[P_RANGE].value; + tmp *= g_paramters[P_LOOPS].value; + + int t, t2; for(int i = 0; i<P_OP_TYPES; i++){ - g_err.println("%s avg: %u us/row", g_ops[i], - (1000*g_times[i])/(g_paramters[P_RANGE].value*g_paramters[P_LOOPS].value)); + g_err << g_ops[i] << " avg: " + << (int)((1000*g_times[i])/tmp) + << " us/row (" + << (1000 * tmp)/g_times[i] << " rows / sec)" << endl; } } diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp index 0cd30dfefde..51913e8fbf9 100644 --- a/ndb/test/ndbapi/testScan.cpp +++ b/ndb/test/ndbapi/testScan.cpp @@ -242,8 +242,9 @@ int runScanReadCommitted(NDBT_Context* ctx, NDBT_Step* step){ HugoTransactions hugoTrans(*ctx->getTab()); while (i<loops && !ctx->isTestStopped()) { g_info << i << ": "; - if (hugoTrans.scanReadCommittedRecords(GETNDB(step), records, - abort, parallelism) != 0){ + if (hugoTrans.scanReadRecords(GETNDB(step), records, + abort, parallelism, + NdbOperation::LM_CommittedRead) != 0){ return NDBT_FAILED; } i++; @@ -639,7 +640,7 @@ int runCheckGetValue(NDBT_Context* ctx, NDBT_Step* step){ g_info << (unsigned)i << endl; if(utilTrans.scanReadRecords(GETNDB(step), parallelism, - false, + NdbOperation::LM_Read, records, alist.attriblist[i]->numAttribs, alist.attriblist[i]->attribs) != 0){ @@ -647,7 +648,7 @@ int runCheckGetValue(NDBT_Context* ctx, NDBT_Step* step){ } if(utilTrans.scanReadRecords(GETNDB(step), parallelism, - true, + NdbOperation::LM_Read, records, alist.attriblist[i]->numAttribs, alist.attriblist[i]->attribs) != 0){ diff --git a/ndb/test/ndbapi/testScanPerf.cpp b/ndb/test/ndbapi/testScanPerf.cpp index 003fc67179f..ee2a92e88a9 100644 --- a/ndb/test/ndbapi/testScanPerf.cpp +++ b/ndb/test/ndbapi/testScanPerf.cpp @@ -39,8 +39,9 @@ struct Parameter { #define P_LOOPS 8 #define P_CREATE 9 #define P_LOAD 10 +#define P_RESET 11 -#define P_MAX 11 +#define P_MAX 12 static Parameter @@ -55,7 +56,8 @@ g_paramters[] = { { "size", 1000000, 1, ~0 }, { "iterations", 3, 1, ~0 }, { "create_drop", 1, 0, 1 }, - { "data", 1, 0, 1 } + { "data", 1, 0, 1 }, + { "q-reset bounds", 0, 1, 0 } }; static Ndb* g_ndb = 0; @@ -219,21 +221,29 @@ run_scan(){ NDB_TICKS start1, stop; int sum_time= 0; + int sample_rows = 0; + NDB_TICKS sample_start = NdbTick_CurrentMillisecond(); + Uint32 tot = g_paramters[P_ROWS].value; + if(g_paramters[P_BOUND].value == 2 || g_paramters[P_FILT].value == 2) + iter *= g_paramters[P_ROWS].value; + + NdbScanOperation * pOp = 0; + NdbIndexScanOperation * pIOp = 0; + NdbConnection * pTrans = 0; + NdbResultSet * rs = 0; + int check = 0; + for(int i = 0; i<iter; i++){ start1 = NdbTick_CurrentMillisecond(); - NdbConnection * pTrans = g_ndb->startTransaction(); + pTrans = pTrans ? pTrans : g_ndb->startTransaction(); if(!pTrans){ g_err << "Failed to start transaction" << endl; err(g_ndb->getNdbError()); return -1; } - NdbScanOperation * pOp; - NdbIndexScanOperation * pIOp; - - NdbResultSet * rs; int par = g_paramters[P_PARRA].value; int bat = g_paramters[P_BATCH].value; NdbScanOperation::LockMode lm; @@ -256,9 +266,17 @@ run_scan(){ assert(pOp); rs = pOp->readTuples(lm, bat, par); } else { - pOp = pIOp = pTrans->getNdbIndexScanOperation(g_indexname, g_tablename); - bool ord = g_paramters[P_ACCESS].value == 2; - rs = pIOp->readTuples(lm, bat, par, ord); + if(g_paramters[P_RESET].value == 0 || pIOp == 0) + { + pOp= pIOp= pTrans->getNdbIndexScanOperation(g_indexname, g_tablename); + bool ord = g_paramters[P_ACCESS].value == 2; + rs = pIOp->readTuples(lm, bat, par, ord); + } + else + { + pIOp->reset_bounds(); + } + switch(g_paramters[P_BOUND].value){ case 0: // All break; @@ -268,20 +286,22 @@ run_scan(){ case 2: { // 1 row default: assert(g_table->getNoOfPrimaryKeys() == 1); // only impl. so far - abort(); -#if 0 int tot = g_paramters[P_ROWS].value; int row = rand() % tot; +#if 0 fix_eq_bound(pIOp, row); +#else + pIOp->setBound((Uint32)0, NdbIndexScanOperation::BoundEQ, &row); #endif break; } } + if(g_paramters[P_RESET].value == 1) + goto execute; } assert(pOp); assert(rs); - int check = 0; switch(g_paramters[P_FILT].value){ case 0: // All check = pOp->interpret_exit_ok(); @@ -313,7 +333,7 @@ run_scan(){ for(int i = 0; i<g_table->getNoOfColumns(); i++){ pOp->getValue(i); } - +execute: int rows = 0; check = pTrans->execute(NoCommit); assert(check == 0); @@ -334,19 +354,29 @@ run_scan(){ return -1; } assert(check == 1); - g_info << "Found " << rows << " rows" << endl; - - pTrans->close(); - + if(g_paramters[P_RESET].value == 0) + { + pTrans->close(); + pTrans = 0; + } stop = NdbTick_CurrentMillisecond(); + int time_passed= (int)(stop - start1); - g_err.println("Time: %d ms = %u rows/sec", time_passed, - (1000*tot)/time_passed); + sample_rows += rows; sum_time+= time_passed; + + if(sample_rows >= tot) + { + int sample_time = (int)(stop - sample_start); + g_info << "Found " << sample_rows << " rows" << endl; + g_err.println("Time: %d ms = %u rows/sec", sample_time, + (1000*sample_rows)/sample_time); + sample_rows = 0; + sample_start = stop; + } } - sum_time= sum_time / iter; - - g_err.println("Avg time: %d ms = %u rows/sec", sum_time, - (1000*tot)/sum_time); + + g_err.println("Avg time: %d ms = %u rows/sec", sum_time/iter, + (1000*tot*iter)/sum_time); return 0; } diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 8d7e8a06c72..aa38fb4763c 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -222,6 +222,10 @@ max-time: 500 cmd: testScan args: -n ScanRead488 -l 10 T6 +max-time: 500 +cmd: testScan +args: -n ScanRead488Timeout -l 10 T6 + max-time: 600 cmd: testScan args: -n ScanRead40 -l 100 T2 diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp index e5f73bc6a5c..ac7710d9546 100644 --- a/ndb/test/run-test/main.cpp +++ b/ndb/test/run-test/main.cpp @@ -538,15 +538,19 @@ connect_ndb_mgm(atrt_process & proc){ } BaseString tmp = proc.m_hostname; tmp.appfmt(":%d", proc.m_ndb_mgm_port); - time_t start = time(0); - const time_t max_connect_time = 30; - do { - if(ndb_mgm_connect(handle, tmp.c_str()) != -1){ - proc.m_ndb_mgm_handle = handle; - return true; - } - sleep(1); - } while(time(0) < (start + max_connect_time)); + + if (ndb_mgm_set_connectstring(handle,tmp.c_str())) + { + g_logger.critical("Unable to create parse connectstring"); + return false; + } + + if(ndb_mgm_connect(handle, 30, 1, 0) != -1) + { + proc.m_ndb_mgm_handle = handle; + return true; + } + g_logger.critical("Unable to connect to ndb mgm %s", tmp.c_str()); return false; } diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 456bfffbb77..096f5406bbf 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -29,26 +29,175 @@ HugoTransactions::~HugoTransactions(){ deallocRows(); } - -int HugoTransactions::scanReadCommittedRecords(Ndb* pNdb, +int +HugoTransactions::scanReadRecords(Ndb* pNdb, int records, int abortPercent, - int parallelism){ - return scanReadRecords(pNdb, records, abortPercent, parallelism, true); + int parallelism, + NdbOperation::LockMode lm) +{ + + int retryAttempt = 0; + const int retryMax = 100; + int check, a; + NdbConnection *pTrans; + NdbScanOperation *pOp; + + while (true){ + + if (retryAttempt >= retryMax){ + g_err << "ERROR: has retried this operation " << retryAttempt + << " times, failing!" << endl; + return NDBT_FAILED; + } + + pTrans = pNdb->startTransaction(); + if (pTrans == NULL) { + const NdbError err = pNdb->getNdbError(); + + if (err.status == NdbError::TemporaryError){ + ERR(err); + NdbSleep_MilliSleep(50); + retryAttempt++; + continue; + } + ERR(err); + return NDBT_FAILED; + } + + pOp = pTrans->getNdbScanOperation(tab.getName()); + if (pOp == NULL) { + ERR(pTrans->getNdbError()); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + NdbResultSet * rs; + rs = pOp ->readTuples(lm); + + if( rs == 0 ) { + ERR(pTrans->getNdbError()); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + check = pOp->interpret_exit_ok(); + if( check == -1 ) { + ERR(pTrans->getNdbError()); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + for(a = 0; a<tab.getNoOfColumns(); a++){ + if((row.attributeStore(a) = + pOp->getValue(tab.getColumn(a)->getName())) == 0) { + ERR(pTrans->getNdbError()); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + } + + check = pTrans->execute(NoCommit); + if( check == -1 ) { + const NdbError err = pTrans->getNdbError(); + if (err.status == NdbError::TemporaryError){ + ERR(err); + pNdb->closeTransaction(pTrans); + NdbSleep_MilliSleep(50); + retryAttempt++; + continue; + } + ERR(err); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + // Abort after 1-100 or 1-records rows + int ranVal = rand(); + int abortCount = ranVal % (records == 0 ? 100 : records); + bool abortTrans = false; + if (abort > 0){ + // Abort if abortCount is less then abortPercent + if (abortCount < abortPercent) + abortTrans = true; + } + + int eof; + int rows = 0; + while((eof = rs->nextResult(true)) == 0){ + rows++; + if (calc.verifyRowValues(&row) != 0){ + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + if (abortCount == rows && abortTrans == true){ + ndbout << "Scan is aborted" << endl; + g_info << "Scan is aborted" << endl; + rs->close(); + if( check == -1 ) { + ERR(pTrans->getNdbError()); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + pNdb->closeTransaction(pTrans); + return NDBT_OK; + } + } + if (eof == -1) { + const NdbError err = pTrans->getNdbError(); + + if (err.status == NdbError::TemporaryError){ + ERR_INFO(err); + pNdb->closeTransaction(pTrans); + NdbSleep_MilliSleep(50); + switch (err.code){ + case 488: + case 245: + case 490: + // Too many active scans, no limit on number of retry attempts + break; + default: + retryAttempt++; + } + continue; + } + ERR(err); + pNdb->closeTransaction(pTrans); + return NDBT_FAILED; + } + + pNdb->closeTransaction(pTrans); + + g_info << rows << " rows have been read" << endl; + if (records != 0 && rows != records){ + g_err << "Check expected number of records failed" << endl + << " expected=" << records <<", " << endl + << " read=" << rows << endl; + return NDBT_FAILED; + } + + return NDBT_OK; + } + return NDBT_FAILED; } int HugoTransactions::scanReadRecords(Ndb* pNdb, + const NdbDictionary::Index * pIdx, int records, int abortPercent, int parallelism, - bool committed){ + NdbOperation::LockMode lm, + bool sorted) +{ int retryAttempt = 0; const int retryMax = 100; int check, a; NdbConnection *pTrans; - NdbScanOperation *pOp; + NdbIndexScanOperation *pOp; while (true){ @@ -72,7 +221,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, return NDBT_FAILED; } - pOp = pTrans->getNdbScanOperation(tab.getName()); + pOp = pTrans->getNdbIndexScanOperation(pIdx->getName(), tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); @@ -80,8 +229,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, } NdbResultSet * rs; - rs = pOp ->readTuples(committed ? NdbScanOperation::LM_CommittedRead : - NdbScanOperation::LM_Read); + rs = pOp ->readTuples(lm, 0, parallelism, sorted); if( rs == 0 ) { ERR(pTrans->getNdbError()); diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index 367223f8c98..1434617c988 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -839,9 +839,9 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab, continue; } pTab2 = pDict->getTable(pTab->getName()); - } else { + } else if(!pTab2) { pTab2 = pTab; - } + } ctx = new NDBT_Context(); ctx->setTab(pTab2); diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index 1ce48d495a5..09f52bf0bed 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -69,28 +69,19 @@ NdbBackup::getBackupDataDirForNode(int _node_id){ /** * Fetch configuration from management server */ - LocalConfig lc; - if (!lc.init(0,0)) { - abort(); - } - ConfigRetriever cr(lc, 0, NODE_TYPE_API); - ndb_mgm_configuration * p = 0; + ndb_mgm_configuration *p; + if (connect()) + return NULL; - BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port); - NdbMgmHandle handle = ndb_mgm_create_handle(); - if(handle == 0 || ndb_mgm_connect(handle, tmp.c_str()) != 0 || - (p = ndb_mgm_get_configuration(handle, 0)) == 0){ - - const char * s = 0; - if(p == 0 && handle != 0){ - s = ndb_mgm_get_latest_error_msg(handle); - if(s == 0) - s = "No error given!"; + if ((p = ndb_mgm_get_configuration(handle, 0)) == 0) + { + const char * s= ndb_mgm_get_latest_error_msg(handle); + if(s == 0) + s = "No error given!"; - ndbout << "Could not fetch configuration" << endl; - ndbout << s << endl; - return NULL; - } + ndbout << "Could not fetch configuration" << endl; + ndbout << s << endl; + return NULL; } /** @@ -155,13 +146,14 @@ NdbBackup::execRestore(bool _restore_data, ndbout << "scp res: " << res << endl; - BaseString::snprintf(buf, 255, "%sndb_restore -c \"host=%s\" -n %d -b %d %s %s .", + BaseString::snprintf(buf, 255, "%sndb_restore -c \"%s:%d\" -n %d -b %d %s %s .", #if 1 "", #else "valgrind --leak-check=yes -v " #endif - addr.c_str(), + ndb_mgm_get_connected_host(handle), + ndb_mgm_get_connected_port(handle), _node_id, _backup_id, _restore_data?"-r":"", diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp index 4d6d3ddc001..91c0963feae 100644 --- a/ndb/test/src/NdbRestarter.cpp +++ b/ndb/test/src/NdbRestarter.cpp @@ -18,7 +18,6 @@ #include <NdbOut.hpp> #include <NdbSleep.h> #include <NdbTick.h> -#include <LocalConfig.hpp> #include <mgmapi_debug.h> #include <NDBT_Output.hpp> #include <random.h> @@ -33,42 +32,11 @@ NdbRestarter::NdbRestarter(const char* _addr): connected(false), - port(-1), handle(NULL), m_config(0) { if (_addr == NULL){ - LocalConfig lcfg; - if(!lcfg.init()){ - lcfg.printError(); - lcfg.printUsage(); - g_err << "NdbRestarter - Error parsing local config file" << endl; - return; - } - - if (lcfg.ids.size() == 0){ - g_err << "NdbRestarter - No management servers configured in local config file" << endl; - return; - } - - for (int i = 0; i<lcfg.ids.size(); i++){ - MgmtSrvrId * m = &lcfg.ids[i]; - - switch(m->type){ - case MgmId_TCP: - char buf[255]; - snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port); - addr.assign(buf); - host.assign(m->name.c_str()); - port = m->port; - return; - break; - case MgmId_File: - break; - default: - break; - } - } + addr.assign(""); } else { addr.assign(_addr); } @@ -391,13 +359,22 @@ NdbRestarter::isConnected(){ int NdbRestarter::connect(){ + disconnect(); handle = ndb_mgm_create_handle(); if (handle == NULL){ g_err << "handle == NULL" << endl; return -1; } g_info << "Connecting to mgmsrv at " << addr.c_str() << endl; - if (ndb_mgm_connect(handle, addr.c_str()) == -1) { + if (ndb_mgm_set_connectstring(handle,addr.c_str())) + { + MGMERR(handle); + g_err << "Connection to " << addr.c_str() << " failed" << endl; + return -1; + } + + if (ndb_mgm_connect(handle, 0, 0, 0) == -1) + { MGMERR(handle); g_err << "Connection to " << addr.c_str() << " failed" << endl; return -1; diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp index c0e6effd244..869f7fc76cb 100644 --- a/ndb/test/src/UtilTransactions.cpp +++ b/ndb/test/src/UtilTransactions.cpp @@ -619,7 +619,7 @@ UtilTransactions::addRowToInsert(Ndb* pNdb, int UtilTransactions::scanReadRecords(Ndb* pNdb, int parallelism, - bool exclusive, + NdbOperation::LockMode lm, int records, int noAttribs, int *attrib_list, @@ -669,10 +669,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb, return NDBT_FAILED; } - NdbResultSet * rs = pOp->readTuples(exclusive ? - NdbScanOperation::LM_Exclusive : - NdbScanOperation::LM_Read, - 0, parallelism); + NdbResultSet * rs = pOp->readTuples(lm, 0, parallelism); if( rs == 0 ) { ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); @@ -761,7 +758,7 @@ int UtilTransactions::selectCount(Ndb* pNdb, int parallelism, int* count_rows, - ScanLock lock, + NdbOperation::LockMode lm, NdbConnection* pTrans){ int retryAttempt = 0; @@ -785,19 +782,7 @@ UtilTransactions::selectCount(Ndb* pNdb, return NDBT_FAILED; } - NdbResultSet * rs; - switch(lock){ - case SL_ReadHold: - rs = pOp->readTuples(NdbScanOperation::LM_Read); - break; - case SL_Exclusive: - rs = pOp->readTuples(NdbScanOperation::LM_Exclusive); - break; - case SL_Read: - default: - rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead); - } - + NdbResultSet * rs = pOp->readTuples(lm); if( rs == 0) { ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp index 75a657522f6..6e4c5377f4a 100644 --- a/ndb/test/tools/create_index.cpp +++ b/ndb/test/tools/create_index.cpp @@ -30,7 +30,7 @@ main(int argc, const char** argv){ const char* _dbname = "TEST_DB"; int _help = 0; - int _ordered, _pk; + int _ordered = 0, _pk = 1; struct getargs args[] = { { "database", 'd', arg_string, &_dbname, "dbname", diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp index cdfdcea4654..42180207a8a 100644 --- a/ndb/test/tools/hugoScanRead.cpp +++ b/ndb/test/tools/hugoScanRead.cpp @@ -35,13 +35,17 @@ int main(int argc, const char** argv){ int _parallelism = 1; const char* _tabname = NULL; int _help = 0; - + int lock = NdbOperation::LM_Read; + int sorted = 0; + struct getargs args[] = { { "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" }, { "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" }, { "parallelism", 'p', arg_integer, &_parallelism, "parallelism(1-240)", "para" }, { "records", 'r', arg_integer, &_records, "Number of records", "recs" }, - { "usage", '?', arg_flag, &_help, "Print help", "" } + { "usage", '?', arg_flag, &_help, "Print help", "" }, + { "lock", 'm', arg_integer, &lock, "lock mode", "" }, + { "sorted", 's', arg_flag, &sorted, "sorted", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; @@ -73,16 +77,48 @@ int main(int argc, const char** argv){ ndbout << " Table " << _tabname << " does not exist!" << endl; return NDBT_ProgramExit(NDBT_WRONGARGS); } + + const NdbDictionary::Index * pIdx = 0; + if(optind+1 < argc) + { + pIdx = MyNdb.getDictionary()->getIndex(argv[optind+1], _tabname); + if(!pIdx) + ndbout << " Index " << argv[optind+1] << " not found" << endl; + else + if(pIdx->getType() != NdbDictionary::Index::UniqueOrderedIndex && + pIdx->getType() != NdbDictionary::Index::OrderedIndex) + { + ndbout << " Index " << argv[optind+1] << " is not scannable" << endl; + pIdx = 0; + } + } HugoTransactions hugoTrans(*pTab); int i = 0; while (i<_loops || _loops==0) { ndbout << i << ": "; - if(hugoTrans.scanReadRecords(&MyNdb, - 0, - _abort, - _parallelism) != 0){ - return NDBT_ProgramExit(NDBT_FAILED); + if(!pIdx) + { + if(hugoTrans.scanReadRecords(&MyNdb, + 0, + _abort, + _parallelism, + (NdbOperation::LockMode)lock) != 0) + { + return NDBT_ProgramExit(NDBT_FAILED); + } + } + else + { + if(hugoTrans.scanReadRecords(&MyNdb, pIdx, + 0, + _abort, + _parallelism, + (NdbOperation::LockMode)lock, + sorted) != 0) + { + return NDBT_ProgramExit(NDBT_FAILED); + } } i++; } diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp index a4fd73a5128..046ac8005d2 100644 --- a/ndb/tools/delete_all.cpp +++ b/ndb/tools/delete_all.cpp @@ -67,7 +67,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index 8f7a2031ef0..c5e9efdfa8a 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -67,7 +67,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index 1d4b454682f..6600811e0c4 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -64,7 +64,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp index 3362c7de47b..0661a8c599b 100644 --- a/ndb/tools/drop_tab.cpp +++ b/ndb/tools/drop_tab.cpp @@ -64,7 +64,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp index 05e864a35c4..b923207a4fe 100644 --- a/ndb/tools/listTables.cpp +++ b/ndb/tools/listTables.cpp @@ -220,7 +220,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); const char* _tabname; - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/restore/restore_main.cpp b/ndb/tools/restore/restore_main.cpp index c43791c6723..409ebd54764 100644 --- a/ndb/tools/restore/restore_main.cpp +++ b/ndb/tools/restore/restore_main.cpp @@ -143,7 +143,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), bool readArguments(int *pargc, char*** pargv) { - const char *load_default_groups[]= { "ndb_tools","ndb_restore",0 }; + const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 }; load_defaults("my",load_default_groups,pargc,pargv); if (handle_options(pargc, pargv, my_long_options, get_one_option)) { diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index 758c1e48c88..5efeed485a4 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -50,7 +50,7 @@ static struct my_option my_long_options[] = GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "parallelism", (gptr*) &_parallelism, (gptr*) &_parallelism, 0, - GET_INT, REQUIRED_ARG, 240, 0, 0, 0, 0, 0 }, + GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)", (gptr*) &_lock, (gptr*) &_lock, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, @@ -105,7 +105,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); const char* _tabname; int ho_error; @@ -133,13 +133,18 @@ int main(int argc, char** argv){ const NdbDictionary::Table* pTab = NDBT_Table::discoverTableFromDb(&MyNdb, _tabname); const NdbDictionary::Index * pIdx = 0; if(argc > 1){ - pIdx = MyNdb.getDictionary()->getIndex(argv[0], _tabname); + pIdx = MyNdb.getDictionary()->getIndex(argv[1], _tabname); } if(pTab == NULL){ ndbout << " Table " << _tabname << " does not exist!" << endl; return NDBT_ProgramExit(NDBT_WRONGARGS); } + + if(argc > 1 && pIdx == 0) + { + ndbout << " Index " << argv[1] << " does not exists" << endl; + } if(_order && pIdx == NULL){ ndbout << " Order flag given without an index" << endl; diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp index 6ee49ddbff0..c3491f842d8 100644 --- a/ndb/tools/select_count.cpp +++ b/ndb/tools/select_count.cpp @@ -83,7 +83,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index e24164ea807..5973b046f8f 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -23,7 +23,6 @@ #include <NdbOut.hpp> #include <NdbSleep.h> #include <kernel/ndb_limits.h> -#include <LocalConfig.hpp> #include <NDBT.hpp> @@ -75,7 +74,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *load_default_groups[]= { "ndb_tools",0 }; + const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); const char* _hostName = NULL; int ho_error; @@ -85,39 +84,8 @@ int main(int argc, char** argv){ char buf[255]; _hostName = argv[0]; - if (_hostName == NULL){ - LocalConfig lcfg; - if(!lcfg.init(opt_connect_str, 0)) - { - lcfg.printError(); - lcfg.printUsage(); - g_err << "Error parsing local config file" << endl; - return NDBT_ProgramExit(NDBT_FAILED); - } - - for (unsigned i = 0; i<lcfg.ids.size();i++) - { - MgmtSrvrId * m = &lcfg.ids[i]; - - switch(m->type){ - case MgmId_TCP: - snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port); - _hostName = buf; - break; - case MgmId_File: - break; - default: - break; - } - if (_hostName != NULL) - break; - } - if (_hostName == NULL) - { - g_err << "No management servers configured in local config file" << endl; - return NDBT_ProgramExit(NDBT_FAILED); - } - } + if (_hostName == 0) + _hostName= opt_connect_str; if (_no_contact) { if (waitClusterStatus(_hostName, NDB_MGM_NODE_STATUS_NO_CONTACT, _timeout) != 0) @@ -210,13 +178,19 @@ waitClusterStatus(const char* _addr, int _nodes[MAX_NDB_NODES]; int _num_nodes = 0; - handle = ndb_mgm_create_handle(); + handle = ndb_mgm_create_handle(); if (handle == NULL){ g_err << "handle == NULL" << endl; return -1; } g_info << "Connecting to mgmsrv at " << _addr << endl; - if (ndb_mgm_connect(handle, _addr) == -1) { + if (ndb_mgm_set_connectstring(handle, _addr)) + { + MGMERR(handle); + g_err << "Connectstring " << _addr << " invalid" << endl; + return -1; + } + if (ndb_mgm_connect(handle,0,0,1)) { MGMERR(handle); g_err << "Connection to " << _addr << " failed" << endl; return -1; diff --git a/sql-common/my_time.c b/sql-common/my_time.c index d1a36a75a5a..29935696b7d 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -343,7 +343,8 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, (l_time->month || l_time->day)) l_time->year+= (l_time->year < YY_PART_YEAR ? 2000 : 1900); - if (number_of_fields < 3 || l_time->month > 12 || + if (number_of_fields < 3 || + l_time->year > 9999 || l_time->month > 12 || l_time->day > 31 || l_time->hour > 23 || l_time->minute > 59 || l_time->second > 59 || (!(flags & TIME_FUZZY_DATE) && (l_time->month == 0 || l_time->day == 0))) @@ -733,10 +734,10 @@ my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap) /* Set MYSQL_TIME structure to 0000-00-00 00:00:00.000000 */ -void set_zero_time(MYSQL_TIME *tm) +void set_zero_time(MYSQL_TIME *tm, enum enum_mysql_timestamp_type time_type) { bzero((void*) tm, sizeof(*tm)); - tm->time_type= MYSQL_TIMESTAMP_NONE; + tm->time_type= time_type; } diff --git a/sql/field.cc b/sql/field.cc index bbd21247b8e..72c27b6adf9 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4086,6 +4086,10 @@ int Field_datetime::store(longlong nr) void Field_datetime::store_time(TIME *ltime,timestamp_type type) { longlong tmp; + /* + We don't perform range checking here since values stored in TIME + structure always fit into DATETIME range. + */ if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+ (ltime->hour*10000L+ltime->minute*100+ltime->second)); diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 07d8da63733..2515b4956d0 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -331,6 +331,9 @@ convert_error_code_to_mysql( } else if (error == (int) DB_NO_SAVEPOINT) { return(HA_ERR_NO_SAVEPOINT); + } else if (error == (int) DB_LOCK_TABLE_FULL) { + + return(HA_ERR_LOCK_TABLE_FULL); } else { return(-1); // Unknown error } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 1962065d617..cf7b66c5f03 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -39,9 +39,6 @@ static const int parallelism= 240; // createable against NDB from this handler static const int max_transactions= 256; -// Default value for prefetch of autoincrement values -static const ha_rows autoincrement_prefetch= 32; - // connectstring to cluster if given by mysqld const char *ndbcluster_connectstring= 0; @@ -103,51 +100,52 @@ struct err_code_mapping { int ndb_err; int my_err; + int show_warning; }; static const err_code_mapping err_map[]= { - { 626, HA_ERR_KEY_NOT_FOUND }, - { 630, HA_ERR_FOUND_DUPP_KEY }, - { 893, HA_ERR_FOUND_DUPP_KEY }, // Unique constraint - { 721, HA_ERR_TABLE_EXIST }, - { 4244, HA_ERR_TABLE_EXIST }, - - { 709, HA_ERR_NO_SUCH_TABLE }, - { 284, HA_ERR_NO_SUCH_TABLE }, - - { 266, HA_ERR_LOCK_WAIT_TIMEOUT }, - { 274, HA_ERR_LOCK_WAIT_TIMEOUT }, - { 296, HA_ERR_LOCK_WAIT_TIMEOUT }, - { 297, HA_ERR_LOCK_WAIT_TIMEOUT }, - { 237, HA_ERR_LOCK_WAIT_TIMEOUT }, - - { 623, HA_ERR_RECORD_FILE_FULL }, - { 624, HA_ERR_RECORD_FILE_FULL }, - { 625, HA_ERR_RECORD_FILE_FULL }, - { 826, HA_ERR_RECORD_FILE_FULL }, - { 827, HA_ERR_RECORD_FILE_FULL }, - { 832, HA_ERR_RECORD_FILE_FULL }, - - { 0, 1 }, - - { -1, -1 } + { 626, HA_ERR_KEY_NOT_FOUND, 0 }, + { 630, HA_ERR_FOUND_DUPP_KEY, 0 }, + { 893, HA_ERR_FOUND_DUPP_KEY, 0 }, + { 721, HA_ERR_TABLE_EXIST, 1 }, + { 4244, HA_ERR_TABLE_EXIST, 1 }, + + { 709, HA_ERR_NO_SUCH_TABLE, 1 }, + { 284, HA_ERR_NO_SUCH_TABLE, 1 }, + + { 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 296, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 297, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + { 237, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, + + { 623, HA_ERR_RECORD_FILE_FULL, 1 }, + { 624, HA_ERR_RECORD_FILE_FULL, 1 }, + { 625, HA_ERR_RECORD_FILE_FULL, 1 }, + { 826, HA_ERR_RECORD_FILE_FULL, 1 }, + { 827, HA_ERR_RECORD_FILE_FULL, 1 }, + { 832, HA_ERR_RECORD_FILE_FULL, 1 }, + + { 0, 1, 0 }, + + { -1, -1, 1 } }; static int ndb_to_mysql_error(const NdbError *err) { uint i; - for (i=0 ; err_map[i].ndb_err != err->code ; i++) + for (i=0; err_map[i].ndb_err != err->code && err_map[i].my_err != -1; i++); + if (err_map[i].show_warning) { - if (err_map[i].my_err == -1){ - // Push the NDB error message as warning - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - err->code, err->message, "NDB"); - return err->code; - } + // Push the NDB error message as warning + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + err->code, err->message, "NDB"); } + if (err_map[i].my_err == -1) + return err->code; return err_map[i].my_err; } @@ -161,7 +159,7 @@ int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) if (m_batch_execute) return 0; #endif - return trans->execute(NoCommit,AbortOnError,1); + return trans->execute(NoCommit,AbortOnError,h->m_force_send); } inline @@ -172,7 +170,18 @@ int execute_commit(ha_ndbcluster *h, NdbConnection *trans) if (m_batch_execute) return 0; #endif - return trans->execute(Commit,AbortOnError,1); + return trans->execute(Commit,AbortOnError,h->m_force_send); +} + +inline +int execute_commit(THD *thd, NdbConnection *trans) +{ + int m_batch_execute= 0; +#ifdef NOT_USED + if (m_batch_execute) + return 0; +#endif + return trans->execute(Commit,AbortOnError,thd->variables.ndb_force_send); } inline @@ -183,7 +192,7 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans) if (m_batch_execute) return 0; #endif - return trans->execute(NoCommit,IgnoreError,1); + return trans->execute(NoCommit,IgnoreError,h->m_force_send); } /* @@ -226,6 +235,8 @@ void ha_ndbcluster::set_rec_per_key() void ha_ndbcluster::records_update() { + if (m_ha_not_exact_count) + return; DBUG_ENTER("ha_ndbcluster::records_update"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", @@ -249,6 +260,8 @@ void ha_ndbcluster::records_update() void ha_ndbcluster::no_uncommitted_rows_execute_failure() { + if (m_ha_not_exact_count) + return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); THD *thd= current_thd; ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 1; @@ -257,6 +270,8 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure() void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) { + if (m_ha_not_exact_count) + return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; @@ -274,6 +289,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) void ha_ndbcluster::no_uncommitted_rows_update(int c) { + if (m_ha_not_exact_count) + return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; @@ -286,6 +303,8 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c) void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) { + if (m_ha_not_exact_count) + return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); ((Thd_ndb*)(thd->transaction.thd_ndb))->count++; ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 0; @@ -777,7 +796,8 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) error= create_unique_index(unique_index_name, key_info); break; case UNIQUE_INDEX: - error= create_unique_index(unique_index_name, key_info); + if (!(error= check_index_fields_not_null(i))) + error= create_unique_index(unique_index_name, key_info); break; case ORDERED_INDEX: error= create_ordered_index(index_name, key_info); @@ -829,6 +849,26 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const ORDERED_INDEX); } +int ha_ndbcluster::check_index_fields_not_null(uint inx) +{ + KEY* key_info= table->key_info + inx; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + DBUG_ENTER("check_index_fields_not_null"); + + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + if (field->maybe_null()) + { + my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), + MYF(0),field->field_name); + DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); + } + } + + DBUG_RETURN(0); +} void ha_ndbcluster::release_metadata() { @@ -1030,6 +1070,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) m_value[i].ptr= NULL; } } + if (execute_no_commit_ie(this,trans) != 0) { table->status= STATUS_NOT_FOUND; @@ -1227,7 +1268,7 @@ inline int ha_ndbcluster::next_result(byte *buf) m_ops_pending= 0; m_blobs_pending= FALSE; } - check= cursor->nextResult(contact_ndb); + check= cursor->nextResult(contact_ndb, m_force_send); if (check == 0) { // One more record found @@ -1250,7 +1291,8 @@ inline int ha_ndbcluster::next_result(byte *buf) DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); if (m_ops_pending) { - if (current_thd->transaction.on) + // if (current_thd->transaction.on) + if (m_transaction_on) { if (execute_no_commit(this,trans) != 0) DBUG_RETURN(ndb_err(trans)); @@ -1519,7 +1561,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, DBUG_ASSERT(op->getSorted() == sorted); DBUG_ASSERT(op->getLockMode() == (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); - if(op->reset_bounds()) + if(op->reset_bounds(m_force_send)) DBUG_RETURN(ndb_err(m_active_trans)); } @@ -1744,7 +1786,8 @@ int ha_ndbcluster::write_row(byte *record) (int)m_rows_inserted, (int)m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; - if (thd->transaction.on) + // if (thd->transaction.on) + if (m_transaction_on) { if (execute_no_commit(this,trans) != 0) { @@ -1827,7 +1870,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) NdbOperation *op; uint i; DBUG_ENTER("update_row"); - + statistic_increment(ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -1917,7 +1960,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) for (i= 0; i < table->fields; i++) { Field *field= table->field[i]; - if ((thd->query_id == field->query_id) && + if (((thd->query_id == field->query_id) || m_retrieve_all_fields) && (!(field->flags & PRI_KEY_FLAG)) && set_ndb_value(op, field, i)) ERR_RETURN(op->getNdbError()); @@ -2180,6 +2223,7 @@ void ha_ndbcluster::print_results() } case NdbDictionary::Column::Decimal: { char *value= field->ptr; + fprintf(DBUG_FILE, "Decimal\t'%-*s'", field->pack_length(), value); break; } @@ -2344,7 +2388,7 @@ int ha_ndbcluster::index_last(byte *buf) int res; if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ NdbResultSet *cursor= m_active_cursor; - while((res= cursor->nextResult(TRUE)) == 0); + while((res= cursor->nextResult(TRUE, m_force_send)) == 0); if(res == 1){ unpack_record(buf); table->status= 0; @@ -2430,7 +2474,7 @@ int ha_ndbcluster::rnd_init(bool scan) { if (!scan) DBUG_RETURN(1); - int res= cursor->restart(); + int res= cursor->restart(m_force_send); DBUG_ASSERT(res == 0); } index_init(table->primary_key); @@ -2461,7 +2505,7 @@ int ha_ndbcluster::close_scan() m_ops_pending= 0; } - cursor->close(); + cursor->close(m_force_send); m_active_cursor= NULL; DBUG_RETURN(0); } @@ -2573,14 +2617,17 @@ void ha_ndbcluster::info(uint flag) DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); if (m_table_info) { - records_update(); + if (m_ha_not_exact_count) + records= 100; + else + records_update(); } else { - Uint64 rows; - if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ - records= rows; - } + Uint64 rows= 100; + if (current_thd->variables.ndb_use_exact_count) + ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0); + records= rows; } } if (flag & HA_STATUS_CONST) @@ -2970,6 +3017,16 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) pointer to point to the NDB transaction. */ + // store thread specific data first to set the right context + m_force_send= thd->variables.ndb_force_send; + m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; + m_autoincrement_prefetch= thd->variables.ndb_autoincrement_prefetch_sz; + if (!thd->transaction.on) + m_transaction_on= FALSE; + else + m_transaction_on= thd->variables.ndb_use_transactions; + // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; + m_active_trans= thd->transaction.all.ndb_tid ? (NdbConnection*)thd->transaction.all.ndb_tid: (NdbConnection*)thd->transaction.stmt.ndb_tid; @@ -3092,7 +3149,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) "stmt" : "all")); DBUG_ASSERT(ndb && trans); - if (execute_commit(0,trans) != 0) + if (execute_commit(thd,trans) != 0) { const NdbError err= trans->getNdbError(); const NdbOperation *error_op= trans->getNdbErrorOperation(); @@ -3646,11 +3703,11 @@ longlong ha_ndbcluster::get_auto_increment() DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); int cache_size= - (m_rows_to_insert - m_rows_inserted < autoincrement_prefetch) ? + (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? m_rows_to_insert - m_rows_inserted - : (m_rows_to_insert > autoincrement_prefetch) ? + : (m_rows_to_insert > m_autoincrement_prefetch) ? m_rows_to_insert - : autoincrement_prefetch; + : m_autoincrement_prefetch; Uint64 auto_value= (m_skip_auto_increment) ? m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) @@ -3689,7 +3746,12 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_blobs_pending(0), m_blobs_buffer(0), m_blobs_buffer_size(0), - m_dupkey((uint) -1) + m_dupkey((uint) -1), + m_ha_not_exact_count(FALSE), + m_force_send(TRUE), + m_autoincrement_prefetch(32), + m_transaction_on(TRUE), + m_use_local_query_cache(FALSE) { int i; @@ -4339,6 +4401,65 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, DBUG_RETURN(10); /* Good guess when you don't know anything */ } +ulong ha_ndbcluster::table_flags(void) const +{ + if (m_ha_not_exact_count) + return m_table_flags | HA_NOT_EXACT_COUNT; + else + return m_table_flags; +} +const char * ha_ndbcluster::table_type() const +{ + return("ndbcluster"); +} +uint ha_ndbcluster::max_supported_record_length() const +{ + return NDB_MAX_TUPLE_SIZE; +} +uint ha_ndbcluster::max_supported_keys() const +{ + return MAX_KEY; +} +uint ha_ndbcluster::max_supported_key_parts() const +{ + return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; +} +uint ha_ndbcluster::max_supported_key_length() const +{ + return NDB_MAX_KEY_SIZE; +} +bool ha_ndbcluster::low_byte_first() const +{ +#ifdef WORDS_BIGENDIAN + return FALSE; +#else + return TRUE; +#endif +} +bool ha_ndbcluster::has_transactions() +{ + return m_transaction_on; +} +const char* ha_ndbcluster::index_type(uint key_number) +{ + switch (get_index_type(key_number)) { + case ORDERED_INDEX: + case UNIQUE_ORDERED_INDEX: + case PRIMARY_KEY_ORDERED_INDEX: + return "BTREE"; + case UNIQUE_INDEX: + case PRIMARY_KEY_INDEX: + default: + return "HASH"; + } +} +uint8 ha_ndbcluster::table_cache_type() +{ + if (m_use_local_query_cache) + return HA_CACHE_TBL_TRANSACT; + else + return HA_CACHE_TBL_NOCACHE; +} /* Handling the shared NDB_SHARE structure that is needed to @@ -4505,13 +4626,12 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, { DBUG_ENTER("ndb_get_table_statistics"); DBUG_PRINT("enter", ("table: %s", table)); - + NdbConnection* pTrans= ndb->startTransaction(); do { - NdbConnection* pTrans= ndb->startTransaction(); if (pTrans == NULL) break; - + NdbScanOperation* pOp= pTrans->getNdbScanOperation(table); if (pOp == NULL) break; @@ -4528,13 +4648,13 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); - check= pTrans->execute(NoCommit); + check= pTrans->execute(NoCommit, AbortOnError, TRUE); if (check == -1) break; Uint64 sum_rows= 0; Uint64 sum_commits= 0; - while((check= rs->nextResult(TRUE)) == 0) + while((check= rs->nextResult(TRUE, TRUE)) == 0) { sum_rows+= rows; sum_commits+= commits; @@ -4543,6 +4663,8 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, if (check == -1) break; + rs->close(TRUE); + ndb->closeTransaction(pTrans); if(row_count) * row_count= sum_rows; @@ -4552,6 +4674,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, DBUG_RETURN(0); } while(0); + ndb->closeTransaction(pTrans); DBUG_PRINT("exit", ("failed")); DBUG_RETURN(-1); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index c6c233c013c..f6c712620c1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -118,15 +118,14 @@ class ha_ndbcluster: public handler int reset(); int external_lock(THD *thd, int lock_type); int start_stmt(THD *thd); - const char * table_type() const { return("ndbcluster");} + const char * table_type() const; const char ** bas_ext() const; - ulong table_flags(void) const { return m_table_flags; } + ulong table_flags(void) const; ulong index_flags(uint idx, uint part, bool all_parts) const; - uint max_supported_record_length() const { return NDB_MAX_TUPLE_SIZE; }; - uint max_supported_keys() const { return MAX_KEY; } - uint max_supported_key_parts() const - { return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; }; - uint max_supported_key_length() const { return NDB_MAX_KEY_SIZE;}; + uint max_supported_record_length() const; + uint max_supported_keys() const; + uint max_supported_key_parts() const; + uint max_supported_key_length() const; int rename_table(const char *from, const char *to); int delete_table(const char *name); @@ -135,28 +134,9 @@ class ha_ndbcluster: public handler THR_LOCK_DATA **to, enum thr_lock_type lock_type); - bool low_byte_first() const - { -#ifdef WORDS_BIGENDIAN - return FALSE; -#else - return TRUE; -#endif - } - bool has_transactions() { return TRUE; } - - const char* index_type(uint key_number) { - switch (get_index_type(key_number)) { - case ORDERED_INDEX: - case UNIQUE_ORDERED_INDEX: - case PRIMARY_KEY_ORDERED_INDEX: - return "BTREE"; - case UNIQUE_INDEX: - case PRIMARY_KEY_INDEX: - default: - return "HASH"; - } - } + bool low_byte_first() const; + bool has_transactions(); + const char* index_type(uint key_number); double scan_time(); ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); @@ -165,7 +145,7 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); - uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } + uint8 table_cache_type(); private: int alter_table_name(const char *from, const char *to); @@ -180,7 +160,8 @@ class ha_ndbcluster: public handler void release_metadata(); NDB_INDEX_TYPE get_index_type(uint idx_no) const; NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; - + int check_index_fields_not_null(uint index_no); + int pk_read(const byte *key, uint key_len, byte *buf); int complemented_pk_read(const byte *old_data, byte *new_data); int peek_row(); @@ -258,6 +239,12 @@ class ha_ndbcluster: public handler char *m_blobs_buffer; uint32 m_blobs_buffer_size; uint m_dupkey; + // set from thread variables at external lock + bool m_ha_not_exact_count; + bool m_force_send; + ha_rows m_autoincrement_prefetch; + bool m_transaction_on; + bool m_use_local_query_cache; void set_rec_per_key(); void records_update(); @@ -267,6 +254,8 @@ class ha_ndbcluster: public handler void no_uncommitted_rows_reset(THD *); friend int execute_no_commit(ha_ndbcluster*, NdbConnection*); + friend int execute_commit(ha_ndbcluster*, NdbConnection*); + friend int execute_no_commit_ie(ha_ndbcluster*, NdbConnection*); }; bool ndbcluster_init(void); diff --git a/sql/handler.cc b/sql/handler.cc index 5dae7950390..7ddd7b80a34 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -953,8 +953,10 @@ int handler::read_first_row(byte * buf, uint primary_key) /* If there is very few deleted rows in the table, find the first row by scanning the table. + TODO remove the test for HA_READ_ORDER */ - if (deleted < 10 || primary_key >= MAX_KEY) + if (deleted < 10 || primary_key >= MAX_KEY || + !(index_flags(primary_key, 0, 0) & HA_READ_ORDER)) { (void) ha_rnd_init(1); while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ; diff --git a/sql/item.cc b/sql/item.cc index 0e7a2b50b51..31c35e87cd4 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -837,6 +837,21 @@ void Item_param::set_double(double d) } +/* + Set parameter value from TIME value. + + SYNOPSIS + set_time() + tm - datetime value to set (time_type is ignored) + type - type of datetime value + max_length_arg - max length of datetime value as string + + NOTE + If we value to be stored is not normalized, zero value will be stored + instead and proper warning will be produced. This function relies on + the fact that even wrong value sent over binary protocol fits into + MAX_DATE_STRING_REP_LENGTH buffer. +*/ void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg) { DBUG_ENTER("Item_param::set_time"); @@ -844,6 +859,17 @@ void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg) value.time= *tm; value.time.time_type= type; + if (value.time.year > 9999 || value.time.month > 12 || + value.time.day > 31 || + type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 || + value.time.minute > 59 || value.time.second > 59) + { + char buff[MAX_DATE_STRING_REP_LENGTH]; + uint length= my_TIME_to_str(&value.time, buff); + make_truncated_value_warning(current_thd, buff, length, type); + set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); + } + state= TIME_VALUE; maybe_null= 0; max_length= max_length_arg; diff --git a/sql/item.h b/sql/item.h index 547577a7ee0..ccb0fda1c49 100644 --- a/sql/item.h +++ b/sql/item.h @@ -266,6 +266,14 @@ public: virtual bool get_time(TIME *ltime); virtual bool get_date_result(TIME *ltime,uint fuzzydate) { return get_date(ltime,fuzzydate); } + /* + This function is used only in Item_func_isnull/Item_func_isnotnull + (implementations of IS NULL/IS NOT NULL clauses). Item_func_is{not}null + calls this method instead of one of val/result*() methods, which + normally will set null_value. This allows to determine nullness of + a complex expression without fully evaluating it. + Any new item which can be NULL must implement this call. + */ virtual bool is_null() { return 0; } /* it is "top level" item of WHERE clause and we do not need correct NULL @@ -573,6 +581,8 @@ public: void print(String *str) { str->append('?'); } /* parameter never equal to other parameter of other item */ bool eq(const Item *item, bool binary_cmp) const { return 0; } + bool is_null() + { DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; } }; class Item_int :public Item_num diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c36f2d191c7..4970517de87 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2364,11 +2364,12 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) return 0; } int error; - if ((error=regcomp(&preg,res->c_ptr(), - (cmp_collation.collation->state & MY_CS_BINSORT) ? - REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE, - cmp_collation.collation))) + if ((error= regcomp(&preg,res->c_ptr(), + ((cmp_collation.collation->state & MY_CS_BINSORT) || + (cmp_collation.collation->state & MY_CS_CSSORT)) ? + REG_EXTENDED | REG_NOSUB : + REG_EXTENDED | REG_NOSUB | REG_ICASE, + cmp_collation.collation))) { (void) regerror(error,&preg,buff,sizeof(buff)); my_printf_error(ER_REGEXP_ERROR,ER(ER_REGEXP_ERROR),MYF(0),buff); @@ -2416,10 +2417,11 @@ longlong Item_func_regex::val_int() regex_compiled=0; } if (regcomp(&preg,res2->c_ptr(), - (cmp_collation.collation->state & MY_CS_BINSORT) ? - REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE, - cmp_collation.collation)) + ((cmp_collation.collation->state & MY_CS_BINSORT) || + (cmp_collation.collation->state & MY_CS_CSSORT)) ? + REG_EXTENDED | REG_NOSUB : + REG_EXTENDED | REG_NOSUB | REG_ICASE, + cmp_collation.collation)) { null_value=1; return 0; diff --git a/sql/item_func.cc b/sql/item_func.cc index c07e9f23ea2..32841ba447b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -350,6 +350,7 @@ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, { uint el= fields.elements; Item *new_item= new Item_ref(ref_pointer_array + el, 0, item->name); + new_item->collation.set(item->collation); fields.push_front(item); ref_pointer_array[el]= item; thd->change_item_tree(arg, new_item); @@ -1601,18 +1602,21 @@ longlong Item_func_bit_count::val_int() udf_handler::~udf_handler() { - if (initialized) + if (!not_original) { - if (u_d->func_deinit != NULL) + if (initialized) { - void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*)) - u_d->func_deinit; - (*deinit)(&initid); + if (u_d->func_deinit != NULL) + { + void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*)) + u_d->func_deinit; + (*deinit)(&initid); + } + free_udf(u_d); } - free_udf(u_d); + if (buffers) // Because of bug in ecc + delete [] buffers; } - if (buffers) // Because of bug in ecc - delete [] buffers; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 3b3a6083725..c43a7d87f8f 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -218,16 +218,13 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) hybrid_type= item->result_type(); if (hybrid_type == INT_RESULT) { - cmp_charset= &my_charset_bin; max_length=20; } else if (hybrid_type == REAL_RESULT) { - cmp_charset= &my_charset_bin; max_length=float_length(decimals); }else { - cmp_charset= item->collation.collation; max_length=item->max_length; } decimals=item->decimals; @@ -557,7 +554,7 @@ bool Item_sum_min::add() { String *result=args[0]->val_str(&tmp_value); if (!args[0]->null_value && - (null_value || sortcmp(&value,result,cmp_charset) > 0)) + (null_value || sortcmp(&value,result,collation.collation) > 0)) { value.copy(*result); null_value=0; @@ -610,7 +607,7 @@ bool Item_sum_max::add() { String *result=args[0]->val_str(&tmp_value); if (!args[0]->null_value && - (null_value || sortcmp(&value,result,cmp_charset) < 0)) + (null_value || sortcmp(&value,result,collation.collation) < 0)) { value.copy(*result); null_value=0; @@ -921,7 +918,7 @@ Item_sum_hybrid::min_max_update_str_field() result_field->val_str(&tmp_value); if (result_field->is_null() || - (cmp_sign * sortcmp(res_str,&tmp_value,cmp_charset)) < 0) + (cmp_sign * sortcmp(res_str,&tmp_value,collation.collation)) < 0) result_field->store(res_str->ptr(),res_str->length(),res_str->charset()); result_field->set_notnull(); } diff --git a/sql/item_sum.h b/sql/item_sum.h index 5aa0d37190b..521c595712b 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -402,20 +402,19 @@ class Item_sum_hybrid :public Item_sum enum_field_types hybrid_field_type; int cmp_sign; table_map used_table_cache; - CHARSET_INFO *cmp_charset; public: Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par), sum(0.0), sum_int(0), hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG), - cmp_sign(sign), used_table_cache(~(table_map) 0), - cmp_charset(&my_charset_bin) - {} + cmp_sign(sign), used_table_cache(~(table_map) 0) + { collation.set(&my_charset_bin); } Item_sum_hybrid(THD *thd, Item_sum_hybrid *item): Item_sum(thd, item), value(item->value), sum(item->sum), sum_int(item->sum_int), hybrid_type(item->hybrid_type), hybrid_field_type(item->hybrid_field_type),cmp_sign(item->cmp_sign), - used_table_cache(item->used_table_cache), cmp_charset(item->cmp_charset) {} + used_table_cache(item->used_table_cache) + { collation.set(item->collation); } bool fix_fields(THD *, TABLE_LIST *, Item **); table_map used_tables() const { return used_table_cache; } bool const_item() const { return !used_table_cache; } @@ -532,7 +531,7 @@ public: :Item_sum( list ), udf(udf_arg) { quick_group=0;} Item_udf_sum(THD *thd, Item_udf_sum *item) - :Item_sum(thd, item), udf(item->udf) {} + :Item_sum(thd, item), udf(item->udf) { udf.not_original= TRUE; } const char *func_name() const { return udf.name(); } bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) { diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index d8142352e70..354c8b5c50c 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1601,50 +1601,46 @@ void Item_func_from_unixtime::fix_length_and_dec() String *Item_func_from_unixtime::val_str(String *str) { TIME time_tmp; - my_time_t tmp; - + DBUG_ASSERT(fixed == 1); - tmp= (time_t) args[0]->val_int(); - if ((null_value=args[0]->null_value)) - goto null_date; - - thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, tmp); - + + if (get_date(&time_tmp, 0)) + return 0; + if (str->alloc(20*MY_CHARSET_BIN_MB_MAXLEN)) - goto null_date; + { + null_value= 1; + return 0; + } + make_datetime((DATE_TIME_FORMAT *) 0, &time_tmp, str); return str; - -null_date: - null_value=1; - return 0; } longlong Item_func_from_unixtime::val_int() { TIME time_tmp; - my_time_t tmp; - + DBUG_ASSERT(fixed == 1); - tmp= (time_t) (ulong) args[0]->val_int(); - if ((null_value=args[0]->null_value)) + if (get_date(&time_tmp, 0)) return 0; - - current_thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, tmp); - + return (longlong) TIME_to_ulonglong_datetime(&time_tmp); } bool Item_func_from_unixtime::get_date(TIME *ltime, uint fuzzy_date __attribute__((unused))) { - my_time_t tmp=(my_time_t) args[0]->val_int(); - if ((null_value=args[0]->null_value)) + longlong tmp= args[0]->val_int(); + + if ((null_value= (args[0]->null_value || + tmp < TIMESTAMP_MIN_VALUE || + tmp > TIMESTAMP_MAX_VALUE))) return 1; - - current_thd->variables.time_zone->gmt_sec_to_TIME(ltime, tmp); + + thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)tmp); return 0; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 1da5359bae0..43407b345fa 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -468,6 +468,7 @@ Query_cache query_cache; #ifdef HAVE_SMEM char *shared_memory_base_name= default_shared_memory_base_name; bool opt_enable_shared_memory; +HANDLE smem_event_connect_request= 0; #endif #include "sslopt-vars.h" @@ -743,6 +744,15 @@ void kill_mysql(void) CloseHandle(hEvent); */ } +#ifdef HAVE_SMEM + /* + Send event to smem_event_connect_request for aborting + */ + if (!SetEvent(smem_event_connect_request)) + { + DBUG_PRINT("error",("Got error: %ld from SetEvent of smem_event_connect_request",GetLastError())); + } +#endif #endif #elif defined(OS2) pthread_cond_signal( &eventShutdown); // post semaphore @@ -3705,7 +3715,6 @@ pthread_handler_decl(handle_connections_shared_memory,arg) /* file-mapping object, use for create shared memory */ HANDLE handle_connect_file_map= 0; char *handle_connect_map= 0; // pointer on shared memory - HANDLE event_connect_request= 0; // for start connection actions HANDLE event_connect_answer= 0; ulong smem_buffer_length= shared_memory_buffer_length + 4; ulong connect_number= 1; @@ -3726,7 +3735,7 @@ pthread_handler_decl(handle_connections_shared_memory,arg) */ suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS); strmov(suffix_pos, "CONNECT_REQUEST"); - if ((event_connect_request= CreateEvent(0,FALSE,FALSE,tmp)) == 0) + if ((smem_event_connect_request= CreateEvent(0,FALSE,FALSE,tmp)) == 0) { errmsg= "Could not create request event"; goto error; @@ -3757,7 +3766,13 @@ pthread_handler_decl(handle_connections_shared_memory,arg) while (!abort_loop) { /* Wait a request from client */ - WaitForSingleObject(event_connect_request,INFINITE); + WaitForSingleObject(smem_event_connect_request,INFINITE); + + /* + it can be after shutdown command + */ + if (abort_loop) + goto error; HANDLE handle_client_file_map= 0; char *handle_client_map= 0; @@ -3882,7 +3897,7 @@ error: if (handle_connect_map) UnmapViewOfFile(handle_connect_map); if (handle_connect_file_map) CloseHandle(handle_connect_file_map); if (event_connect_answer) CloseHandle(event_connect_answer); - if (event_connect_request) CloseHandle(event_connect_request); + if (smem_event_connect_request) CloseHandle(smem_event_connect_request); decrement_handler_count(); DBUG_RETURN(0); @@ -3947,7 +3962,10 @@ enum options_mysqld OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, - OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_SKIP_SAFEMALLOC, + OPT_INNODB, OPT_ISAM, + OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, + OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL, @@ -4386,9 +4404,26 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, #ifdef HAVE_NDBCLUSTER_DB - {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.", - (gptr*) &ndbcluster_connectstring, (gptr*) &ndbcluster_connectstring, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-connectstring", OPT_NDB_CONNECTSTRING, + "Connect string for ndbcluster.", + (gptr*) &ndbcluster_connectstring, (gptr*) &ndbcluster_connectstring, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb_autoincrement_prefetch_sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, + "Specify number of autoincrement values that are prefetched", + (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, + (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, + 0, GET_INT, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, + {"ndb_force_send", OPT_NDB_FORCE_SEND, + "Force send of buffers to ndb immediately without waiting for other threads", + (gptr*) &global_system_variables.ndb_force_send, + (gptr*) &global_system_variables.ndb_force_send, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT, + "Use exact records count during query planning and for " + "fast select count(*)", + (gptr*) &global_system_variables.ndb_use_exact_count, + (gptr*) &global_system_variables.ndb_use_exact_count, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 720e82f414e..356ec80608c 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -921,8 +921,8 @@ int load_master_data(THD* thd) */ int error; - if (init_master_info(active_mi, master_info_file, relay_log_info_file, - 0)) + if (init_master_info(active_mi, master_info_file, relay_log_info_file, + 0, (SLAVE_IO | SLAVE_SQL))) send_error(thd, ER_MASTER_INFO); strmake(active_mi->master_log_name, row[0], sizeof(active_mi->master_log_name)); diff --git a/sql/set_var.cc b/sql/set_var.cc index a97506ad07c..2031ac15412 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -359,6 +359,23 @@ sys_var_thd_bool sys_innodb_table_locks("innodb_table_locks", sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment", &srv_auto_extend_increment); #endif +#ifdef HAVE_NDBCLUSTER_DB +// ndb thread specific variable settings +sys_var_thd_ulong +sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", + &SV::ndb_autoincrement_prefetch_sz); +sys_var_thd_bool +sys_ndb_force_send("ndb_force_send", + &SV::ndb_force_send); +sys_var_thd_bool +sys_ndb_use_exact_count("ndb_use_exact_count", + &SV::ndb_use_exact_count); +sys_var_thd_bool +sys_ndb_use_transactions("ndb_use_transactions", + &SV::ndb_use_transactions); +// ndb server global variable settings +// none +#endif /* Time/date/datetime formats */ @@ -612,7 +629,13 @@ sys_var *sys_variables[]= &sys_innodb_table_locks, &sys_innodb_max_purge_lag, &sys_innodb_autoextend_increment, -#endif +#endif +#ifdef HAVE_NDBCLUSTER_DB + &sys_ndb_autoincrement_prefetch_sz, + &sys_ndb_force_send, + &sys_ndb_use_exact_count, + &sys_ndb_use_transactions, +#endif &sys_unique_checks, &sys_warning_count }; @@ -773,6 +796,13 @@ struct show_var_st init_vars[]= { #ifdef __NT__ {"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL}, #endif +#ifdef HAVE_NDBCLUSTER_DB + {sys_ndb_autoincrement_prefetch_sz.name, + (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, + {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, + {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, + {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, +#endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, @@ -2703,13 +2733,18 @@ int sql_set_variables(THD *thd, List<set_var_base> *var_list) while ((var=it++)) { if ((error=var->check(thd))) - DBUG_RETURN(error); + goto err; } - if (thd->net.report_error) - DBUG_RETURN(1); - it.rewind(); - while ((var=it++)) - error|= var->update(thd); // Returns 0, -1 or 1 + if (!thd->net.report_error) + { + it.rewind(); + while ((var= it++)) + error|= var->update(thd); // Returns 0, -1 or 1 + } + else + error= 1; +err: + free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(error); } diff --git a/sql/slave.cc b/sql/slave.cc index 6bc977e8d41..6d5c997bade 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -161,7 +161,7 @@ int init_slave() } if (init_master_info(active_mi,master_info_file,relay_log_info_file, - !master_host)) + !master_host, (SLAVE_IO | SLAVE_SQL))) { sql_print_error("Failed to initialize the master info structure"); goto err; @@ -1799,7 +1799,8 @@ void clear_until_condition(RELAY_LOG_INFO* rli) int init_master_info(MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, - bool abort_if_no_master_info_file) + bool abort_if_no_master_info_file, + int thread_mask) { int fd,error; char fname[FN_REFLEN+128]; @@ -1813,8 +1814,15 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, last time. If this case pos_in_file would be set and we would get a crash when trying to read the signature for the binary relay log. + + We only rewind the read position if we are starting the SQL + thread. The handle_slave_sql thread assumes that the read + position is at the beginning of the file, and will read the + "signature" and then fast-forward to the last position read. */ - my_b_seek(mi->rli.cur_log, (my_off_t) 0); + if (thread_mask & SLAVE_SQL) { + my_b_seek(mi->rli.cur_log, (my_off_t) 0); + } DBUG_RETURN(0); } diff --git a/sql/slave.h b/sql/slave.h index c6cb81699a2..a4d123329c6 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -522,7 +522,8 @@ void clear_until_condition(RELAY_LOG_INFO* rli); void clear_slave_error_timestamp(RELAY_LOG_INFO* rli); int init_master_info(MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, - bool abort_if_no_master_info_file); + bool abort_if_no_master_info_file, + int thread_mask); void end_master_info(MASTER_INFO* mi); int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname); void end_relay_log_info(RELAY_LOG_INFO* rli); diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 98af43e17a7..d6f52fed1d2 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1384,8 +1384,10 @@ static bool update_user_table(THD *thd, const char *host, const char *user, table->field[0]->store(host,(uint) strlen(host), &my_charset_latin1); table->field[1]->store(user,(uint) strlen(user), &my_charset_latin1); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, HA_READ_KEY_EXACT)) { my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ @@ -1463,9 +1465,11 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); table->field[1]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0], 0, - (byte*) table->field[0]->ptr,0, - HA_READ_KEY_EXACT)) + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (!create_user) { @@ -1568,6 +1572,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, We should NEVER delete from the user table, as a uses can still use mysqld even if he doesn't have any privileges in the user table! */ + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (cmp_record(table,record[1]) && (error=table->file->update_row(table->record[1],table->record[0]))) { // This should never happen @@ -1645,8 +1650,11 @@ static int replace_db_table(TABLE *table, const char *db, table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); - if (table->file->index_read_idx(table->record[0],0,(byte*) table->field[0]->ptr,0, - HA_READ_KEY_EXACT)) + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + if (table->file->index_read_idx(table->record[0],0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (what == 'N') { // no row, no revoke @@ -1679,6 +1687,7 @@ static int replace_db_table(TABLE *table, const char *db, /* update old existing row */ if (rights) { + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if ((error=table->file->update_row(table->record[1],table->record[0]))) goto table_error; /* purecov: deadcode */ } @@ -1953,8 +1962,10 @@ static int replace_column_table(GRANT_TABLE *g_t, table->field[4]->store(xx->column.ptr(),xx->column.length(), &my_charset_latin1); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) { if (revoke_grant) { @@ -2022,8 +2033,10 @@ static int replace_column_table(GRANT_TABLE *g_t, if (revoke_grant) { + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr, - key_length, HA_READ_KEY_EXACT)) + table->key_info[0].key_length, + HA_READ_KEY_EXACT)) goto end; /* Scan through all rows with the same host,db,user and table */ @@ -2112,9 +2125,10 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); table->field[3]->store(table_name,(uint) strlen(table_name), &my_charset_latin1); store_record(table,record[1]); // store at pos 1 - + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + (byte*) table->field[0]->ptr, + table->key_info[0].key_length, HA_READ_KEY_EXACT)) { /* @@ -3571,9 +3585,12 @@ int mysql_drop_user(THD *thd, List <LEX_USER> &list) tables[0].table->field[1]->store(user_name->user.str,(uint) user_name->user.length, system_charset_info); + tables[0].table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (!tables[0].table->file->index_read_idx(tables[0].table->record[0],0, (byte*) tables[0].table-> - field[0]->ptr,0, + field[0]->ptr, + tables[0].table-> + key_info[0].key_length, HA_READ_KEY_EXACT)) { int error; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index eda60b5cfdb..bab81d785c3 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -280,6 +280,9 @@ void THD::init(void) variables.date_format); variables.datetime_format= date_time_format_copy((THD*) 0, variables.datetime_format); +#ifdef HAVE_NDBCLUSTER_DB + variables.ndb_use_transactions= 1; +#endif pthread_mutex_unlock(&LOCK_global_system_variables); server_status= SERVER_STATUS_AUTOCOMMIT; options= thd_startup_options; diff --git a/sql/sql_class.h b/sql/sql_class.h index 312d9de9794..d0d9afc7746 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -399,6 +399,12 @@ struct system_variables #ifdef HAVE_INNOBASE_DB my_bool innodb_table_locks; #endif /* HAVE_INNOBASE_DB */ +#ifdef HAVE_NDBCLUSTER_DB + ulong ndb_autoincrement_prefetch_sz; + my_bool ndb_force_send; + my_bool ndb_use_exact_count; + my_bool ndb_use_transactions; +#endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; /* Only charset part of these variables is sensible */ diff --git a/sql/sql_do.cc b/sql/sql_do.cc index 25a8359f3d2..0d4529fb29e 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -29,6 +29,7 @@ int mysql_do(THD *thd, List<Item> &values) DBUG_RETURN(-1); while ((value = li++)) value->val_int(); + free_underlaid_joins(thd, &thd->lex->select_lex); thd->clear_error(); // DO always is OK send_ok(thd); DBUG_RETURN(0); diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index d81ed2cd014..a790e6fe9d8 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -349,12 +349,6 @@ static void set_param_time(Item_param *param, uchar **pos, ulong len) tm.neg= (bool) to[0]; day= (uint) sint4korr(to+1); - /* - Note, that though ranges of hour, minute and second are not checked - here we rely on them being < 256: otherwise - we'll get buffer overflow in make_{date,time} functions, - which are called when time value is converted to string. - */ tm.hour= (uint) to[5] + day * 24; tm.minute= (uint) to[6]; tm.second= (uint) to[7]; @@ -369,7 +363,7 @@ static void set_param_time(Item_param *param, uchar **pos, ulong len) tm.day= tm.year= tm.month= 0; } else - set_zero_time(&tm); + set_zero_time(&tm, MYSQL_TIMESTAMP_TIME); param->set_time(&tm, MYSQL_TIMESTAMP_TIME, MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); *pos+= length; @@ -388,11 +382,6 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len) tm.year= (uint) sint2korr(to); tm.month= (uint) to[2]; tm.day= (uint) to[3]; - /* - Note, that though ranges of hour, minute and second are not checked - here we rely on them being < 256: otherwise - we'll get buffer overflow in make_{date,time} functions. - */ if (length > 4) { tm.hour= (uint) to[4]; @@ -405,7 +394,7 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len) tm.second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0; } else - set_zero_time(&tm); + set_zero_time(&tm, MYSQL_TIMESTAMP_DATETIME); param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME, MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); *pos+= length; @@ -419,11 +408,7 @@ static void set_param_date(Item_param *param, uchar **pos, ulong len) if (length >= 4) { uchar *to= *pos; - /* - Note, that though ranges of hour, minute and second are not checked - here we rely on them being < 256: otherwise - we'll get buffer overflow in make_{date,time} functions. - */ + tm.year= (uint) sint2korr(to); tm.month= (uint) to[2]; tm.day= (uint) to[3]; @@ -433,7 +418,7 @@ static void set_param_date(Item_param *param, uchar **pos, ulong len) tm.neg= 0; } else - set_zero_time(&tm); + set_zero_time(&tm, MYSQL_TIMESTAMP_DATE); param->set_time(&tm, MYSQL_TIMESTAMP_DATE, MAX_DATE_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); *pos+= length; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index a84b63c270b..bbcea537ff1 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -683,7 +683,8 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) thread_mask&= thd->lex->slave_thd_opt; if (thread_mask) //some threads are stopped, start them { - if (init_master_info(mi,master_info_file,relay_log_info_file, 0)) + if (init_master_info(mi,master_info_file,relay_log_info_file, 0, + thread_mask)) slave_errno=ER_MASTER_INFO; else if (server_id_supplied && *mi->host) { @@ -978,7 +979,8 @@ int change_master(THD* thd, MASTER_INFO* mi) thd->proc_info = "Changing master"; LEX_MASTER_INFO* lex_mi= &thd->lex->mi; // TODO: see if needs re-write - if (init_master_info(mi, master_info_file, relay_log_info_file, 0)) + if (init_master_info(mi, master_info_file, relay_log_info_file, 0, + thread_mask)) { send_error(thd, ER_MASTER_INFO); unlock_slave_threads(mi); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9162cd30d63..40dae434c5e 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2154,7 +2154,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, bool is_const=1; for (uint i=0; i<num_values; i++) - is_const&= (*value)->const_item(); + is_const&= value[i]->const_item(); if (is_const) stat[0].const_keys.merge(possible_keys); /* diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 3a242dc6547..eedd9388877 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -835,7 +835,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, #endif } - List_iterator<key_part_spec> cols(key->columns); + List_iterator<key_part_spec> cols(key->columns), cols2(key->columns); CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { @@ -853,6 +853,19 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, column->field_name); DBUG_RETURN(-1); } + for (uint dup_nr= 0; dup_nr < column_nr; dup_nr++) + { + key_part_spec *dup_column= cols2++; + if (!my_strcasecmp(system_charset_info, + column->field_name, dup_column->field_name)) + { + my_printf_error(ER_DUP_FIELDNAME, + ER(ER_DUP_FIELDNAME),MYF(0), + column->field_name); + DBUG_RETURN(-1); + } + } + cols2.rewind(); /* for fulltext keys keyseg length is 1 for blobs (it's ignored in ft code anyway, and 0 (set to column width later) for char's. it has to be correct col width for char's, as char data are not diff --git a/sql/sql_udf.h b/sql/sql_udf.h index 7b10b80f148..d1f99a6d232 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -56,8 +56,9 @@ class udf_handler :public Sql_alloc public: table_map used_tables_cache; bool const_item_cache; + bool not_original; udf_handler(udf_func *udf_arg) :u_d(udf_arg), buffers(0), error(0), - is_null(0), initialized(0) + is_null(0), initialized(0), not_original(0) {} ~udf_handler(); const char *name() const { return u_d ? u_d->name.str : "?"; } diff --git a/sql/table.cc b/sql/table.cc index 20ac714020d..cb565097c0b 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -463,7 +463,26 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, /* old frm file */ field_type= (enum_field_types) f_packtype(pack_flag); - charset=f_is_binary(pack_flag) ? &my_charset_bin : outparam->table_charset; + if (f_is_binary(pack_flag)) + { + /* + Try to choose the best 4.1 type: + - for 4.0 "CHAR(N) BINARY" or "VARCHAR(N) BINARY" + try to find a binary collation for character set. + - for other types (e.g. BLOB) just use my_charset_bin. + */ + if (!f_is_blob(pack_flag)) + { + // 3.23 or 4.0 string + if (!(charset= get_charset_by_csname(outparam->table_charset->csname, + MY_CS_BINSORT, MYF(0)))) + charset= &my_charset_bin; + } + else + charset= &my_charset_bin; + } + else + charset= outparam->table_charset; bzero((char*) &comment, sizeof(comment)); } *field_ptr=reg_field= diff --git a/strings/ctype-czech.c b/strings/ctype-czech.c index 6f9e9f74d35..2177a18504e 100644 --- a/strings/ctype-czech.c +++ b/strings/ctype-czech.c @@ -589,12 +589,12 @@ static MY_COLLATION_HANDLER my_collation_latin2_czech_ci_handler = CHARSET_INFO my_charset_latin2_czech_ci = { - 2,0,0, /* number */ - MY_CS_COMPILED|MY_CS_STRNXFRM, /* state */ - "latin2", /* cs name */ - "latin2_czech_cs", /* name */ - "", /* comment */ - NULL, /* tailoring */ + 2,0,0, /* number */ + MY_CS_COMPILED|MY_CS_STRNXFRM|MY_CS_CSSORT, /* state */ + "latin2", /* cs name */ + "latin2_czech_cs", /* name */ + "", /* comment */ + NULL, /* tailoring */ ctype_czech, to_lower_czech, to_upper_czech, diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index 7d81766c4cb..3cdf7f460cd 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -443,6 +443,37 @@ static void my_hash_sort_mb_bin(CHARSET_INFO *cs __attribute__((unused)), } } + +/* + Write max key: create a buffer with multibyte + representation of the max_sort_char character, + and copy it into max_str in a loop. +*/ +static void pad_max_char(CHARSET_INFO *cs, char *str, char *end) +{ + char buf[10]; + char buflen= cs->cset->wc_mb(cs, cs->max_sort_char, (uchar*) buf, + (uchar*) buf + sizeof(buf)); + DBUG_ASSERT(buflen > 0); + do + { + if ((str + buflen) < end) + { + /* Enough space for the characer */ + memcpy(str, buf, buflen); + str+= buflen; + } + else + { + /* + There is no space for whole multibyte + character, then add trailing spaces. + */ + *str++= ' '; + } + } while (str < end); +} + /* ** Calculate min_str and max_str that ranges a LIKE string. ** Arguments: @@ -467,10 +498,15 @@ my_bool my_like_range_mb(CHARSET_INFO *cs, char *min_str,char *max_str, uint *min_length,uint *max_length) { - const char *end=ptr+ptr_length; - char *min_org=min_str; - char *min_end=min_str+res_length; - char *max_end=max_str+res_length; + const char *end; + char *min_org= min_str; + char *min_end= min_str + res_length; + char *max_end= max_str + res_length; + uint charlen= my_charpos(cs, ptr, ptr+ptr_length, res_length/cs->mbmaxlen); + + if (charlen < ptr_length) + ptr_length= charlen; + end= ptr + ptr_length; for (; ptr != end && min_str != min_end ; ptr++) { @@ -482,16 +518,14 @@ my_bool my_like_range_mb(CHARSET_INFO *cs, } if (*ptr == w_one || *ptr == w_many) /* '_' and '%' in SQL */ { - char buf[10]; - uint buflen; - uint charlen= my_charpos(cs, min_org, min_str, res_length/cs->mbmaxlen); + charlen= my_charpos(cs, min_org, min_str, res_length/cs->mbmaxlen); if (charlen < (uint) (min_str - min_org)) min_str= min_org + charlen; /* Write min key */ *min_length= (uint) (min_str - min_org); - *max_length=res_length; + *max_length= res_length; do { *min_str++= (char) cs->min_sort_char; @@ -502,27 +536,7 @@ my_bool my_like_range_mb(CHARSET_INFO *cs, representation of the max_sort_char character, and copy it into max_str in a loop. */ - buflen= cs->cset->wc_mb(cs, cs->max_sort_char, (uchar*) buf, - (uchar*) buf + sizeof(buf)); - DBUG_ASSERT(buflen > 0); - do - { - if ((max_str + buflen) <= max_end) - { - /* Enough space for max characer */ - memcpy(max_str, buf, buflen); - max_str+= buflen; - } - else - { - /* - There is no space for whole multibyte - character, then add trailing spaces. - */ - - *max_str++= ' '; - } - } while (max_str != max_end); + pad_max_char(cs, max_str, max_end); return 0; } *min_str++= *max_str++ = *ptr; @@ -530,7 +544,8 @@ my_bool my_like_range_mb(CHARSET_INFO *cs, *min_length= *max_length = (uint) (min_str - min_org); while (min_str != min_end) - *min_str++ = *max_str++ = ' '; /* Because if key compression */ + *min_str++= ' '; /* Because if key compression */ + pad_max_char(cs, max_str, max_end); return 0; } diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c index 91af7af0c54..89c876ad10c 100644 --- a/strings/ctype-uca.c +++ b/strings/ctype-uca.c @@ -7288,6 +7288,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs, { while (1) { + my_bool escaped= 0; if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr, (const uchar*)wildend)) <= 0) return 1; @@ -7305,6 +7306,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs, (const uchar*)wildend)) <= 0) return 1; wildstr+= scan; + escaped= 1; } if ((scan= mb_wc(cs, &s_wc, (const uchar*)str, @@ -7312,7 +7314,7 @@ int my_wildcmp_uca(CHARSET_INFO *cs, return 1; str+= scan; - if (w_wc == (my_wc_t)w_one) + if (!escaped && w_wc == (my_wc_t)w_one) { result= 1; /* Found an anchor char */ } @@ -8567,7 +8569,7 @@ CHARSET_INFO my_charset_utf8_icelandic_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8594,7 +8596,7 @@ CHARSET_INFO my_charset_utf8_latvian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8621,7 +8623,7 @@ CHARSET_INFO my_charset_utf8_romanian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8648,7 +8650,7 @@ CHARSET_INFO my_charset_utf8_slovenian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8675,7 +8677,7 @@ CHARSET_INFO my_charset_utf8_polish_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8702,7 +8704,7 @@ CHARSET_INFO my_charset_utf8_estonian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8729,7 +8731,7 @@ CHARSET_INFO my_charset_utf8_spanish_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8756,7 +8758,7 @@ CHARSET_INFO my_charset_utf8_swedish_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8783,7 +8785,7 @@ CHARSET_INFO my_charset_utf8_turkish_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8810,7 +8812,7 @@ CHARSET_INFO my_charset_utf8_czech_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8838,7 +8840,7 @@ CHARSET_INFO my_charset_utf8_danish_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8865,7 +8867,7 @@ CHARSET_INFO my_charset_utf8_lithuanian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8892,7 +8894,7 @@ CHARSET_INFO my_charset_utf8_slovak_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8919,7 +8921,7 @@ CHARSET_INFO my_charset_utf8_spanish2_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8946,7 +8948,7 @@ CHARSET_INFO my_charset_utf8_roman_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, @@ -8973,7 +8975,7 @@ CHARSET_INFO my_charset_utf8_persian_uca_ci= NULL, /* ident_map */ 8, /* strxfrm_multiply */ 1, /* mbminlen */ - 2, /* mbmaxlen */ + 3, /* mbmaxlen */ 9, /* min_sort_char */ 0xFFFF, /* max_sort_char */ &my_charset_utf8_handler, diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index b3097649158..ce9346eb475 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -1545,31 +1545,33 @@ int my_wildcmp_unicode(CHARSET_INFO *cs, { while (1) { + my_bool escaped= 0; if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr, (const uchar*)wildend)) <= 0) return 1; - - if (w_wc == (my_wc_t)escape) - { - wildstr+= scan; - if ((scan= mb_wc(cs,&w_wc, (const uchar*)wildstr, - (const uchar*)wildend)) <= 0) - return 1; - } - + if (w_wc == (my_wc_t)w_many) { result= 1; /* Found an anchor char */ break; } - + wildstr+= scan; + if (w_wc == (my_wc_t)escape) + { + if ((scan= mb_wc(cs, &w_wc, (const uchar*)wildstr, + (const uchar*)wildend)) <= 0) + return 1; + wildstr+= scan; + escaped= 1; + } + if ((scan= mb_wc(cs, &s_wc, (const uchar*)str, - (const uchar*)str_end)) <=0) + (const uchar*)str_end)) <= 0) return 1; str+= scan; - if (w_wc == (my_wc_t)w_one) + if (!escaped && w_wc == (my_wc_t)w_one) { result= 1; /* Found an anchor char */ } diff --git a/strings/ctype-win1250ch.c b/strings/ctype-win1250ch.c index b4dbda3e8ed..4ada3d47bf5 100644 --- a/strings/ctype-win1250ch.c +++ b/strings/ctype-win1250ch.c @@ -624,12 +624,12 @@ static MY_COLLATION_HANDLER my_collation_czech_ci_handler = CHARSET_INFO my_charset_cp1250_czech_ci = { - 34,0,0, /* number */ - MY_CS_COMPILED|MY_CS_STRNXFRM, /* state */ - "cp1250", /* cs name */ - "cp1250_czech_cs", /* name */ - "", /* comment */ - NULL, /* tailoring */ + 34,0,0, /* number */ + MY_CS_COMPILED|MY_CS_STRNXFRM|MY_CS_CSSORT, /* state */ + "cp1250", /* cs name */ + "cp1250_czech_cs", /* name */ + "", /* comment */ + NULL, /* tailoring */ ctype_win1250ch, to_lower_win1250ch, to_upper_win1250ch, diff --git a/strings/uca-dump.c b/strings/uca-dump.c index 6836c321526..dd3b74a55e8 100644 --- a/strings/uca-dump.c +++ b/strings/uca-dump.c @@ -23,13 +23,14 @@ struct uca_item_st #define MY_UCA_PSHIFT 8 #endif +static char *pname[]= {"", "2", "3"}; + int main(int ac, char **av) { char str[256]; char *weights[64]; struct uca_item_st uca[64*1024]; - size_t code, page, w; - int pagemaxlen[MY_UCA_NPAGES]; + size_t code, w; int pageloaded[MY_UCA_NPAGES]; bzero(uca, sizeof(uca)); @@ -155,14 +156,20 @@ int main(int ac, char **av) printf("#define MY_UCA_CMASK %d\n",MY_UCA_CMASK); printf("#define MY_UCA_PSHIFT %d\n",MY_UCA_PSHIFT); - for (w=0; w<1; w++) + for (w=0; w<3; w++) { + size_t page; + int pagemaxlen[MY_UCA_NPAGES]; + for (page=0; page < MY_UCA_NPAGES; page++) { size_t offs; size_t maxnum= 0; size_t nchars= 0; size_t mchars; + size_t ndefs= 0; + + pagemaxlen[page]= 0; /* Skip this page if no weights were loaded @@ -183,15 +190,36 @@ int main(int ac, char **av) code= page*MY_UCA_NCHARS+offs; /* Calculate only non-zero weights */ - num=0; - for (i=0; i < uca[code].num; i++) + for (num=0, i=0; i < uca[code].num; i++) if (uca[code].weight[w][i]) num++; maxnum= maxnum < num ? num : maxnum; + + /* Check if default weight */ + if (w == 1 && num == 1) + { + /* 0020 0000 ... */ + if (uca[code].weight[w][0] == 0x0020) + ndefs++; + } + else if (w == 2 && num == 1) + { + /* 0002 0000 ... */ + if (uca[code].weight[w][0] == 0x0002) + ndefs++; + } } maxnum++; + /* + If the page have only default weights + then no needs to dump it, skip. + */ + if (ndefs == MY_UCA_NCHARS) + { + continue; + } switch (maxnum) { case 0: mchars= 8; break; @@ -210,8 +238,8 @@ int main(int ac, char **av) */ - printf("uint16 page%03Xdata[]= { /* %04X (%d weights per char) */\n", - page, page*MY_UCA_NCHARS, maxnum); + printf("uint16 page%03Xdata%s[]= { /* %04X (%d weights per char) */\n", + page, pname[w], page*MY_UCA_NCHARS, maxnum); for (offs=0; offs < MY_UCA_NCHARS; offs++) { @@ -234,7 +262,17 @@ int main(int ac, char **av) for (i=0; i < maxnum; i++) { - printf("0x%04X",(int)weight[i]); + /* + Invert weights for secondary level to + sort upper case letters before their + lower case counter part. + */ + int tmp= weight[i]; + if (w == 2 && tmp) + tmp= (int)(0x20 - weight[i]); + + + printf("0x%04X", tmp); if ((offs+1 != MY_UCA_NCHARS) || (i+1!=maxnum)) printf(","); nchars++; @@ -251,25 +289,28 @@ int main(int ac, char **av) } printf("};\n\n"); } - } - printf("uchar ucal[%d]={\n",MY_UCA_NPAGES); - for (page=0; page < MY_UCA_NPAGES; page++) - { - printf("%d%s%s",pagemaxlen[page],page<MY_UCA_NPAGES-1?",":"",(page+1) % 16 ? "":"\n"); - } - printf("};\n"); - - - printf("uint16 *ucaw[%d]={\n",MY_UCA_NPAGES); - for (page=0; page < MY_UCA_NPAGES; page++) - { - if (!pageloaded[page]) - printf("NULL %s%s",page<MY_UCA_NPAGES-1?",":"", (page+1) % 4 ? "":"\n"); - else - printf("page%03Xdata%s%s",page,page<MY_UCA_NPAGES-1?",":"", (page+1) % 4 ? "":"\n"); + printf("uchar uca_length%s[%d]={\n", pname[w], MY_UCA_NPAGES); + for (page=0; page < MY_UCA_NPAGES; page++) + { + printf("%d%s%s",pagemaxlen[page],page<MY_UCA_NPAGES-1?",":"",(page+1) % 16 ? "":"\n"); + } + printf("};\n"); + + + printf("uint16 *uca_weight%s[%d]={\n", pname[w], MY_UCA_NPAGES); + for (page=0; page < MY_UCA_NPAGES; page++) + { + const char *comma= page < MY_UCA_NPAGES-1 ? "," : ""; + const char *nline= (page+1) % 4 ? "" : "\n"; + if (!pagemaxlen[page]) + printf("NULL %s%s%s", w ? " ": "", comma , nline); + else + printf("page%03Xdata%s%s%s", page, pname[w], comma, nline); + } + printf("};\n"); } - printf("};\n"); + printf("int main(void){ return 0;};\n"); return 0; diff --git a/tests/client_test.c b/tests/client_test.c index 0ada98d44b0..35990a521a4 100644 --- a/tests/client_test.c +++ b/tests/client_test.c @@ -11121,6 +11121,140 @@ static void test_bug6096() } +/* + Test of basic checks that are performed in server for components + of MYSQL_TIME parameters. +*/ + +static void test_datetime_ranges() +{ + const char *stmt_text; + int rc, i; + MYSQL_STMT *stmt; + MYSQL_BIND bind[6]; + MYSQL_TIME tm[6]; + + myheader("test_datetime_ranges"); + + stmt_text= "drop table if exists t1"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); + + stmt_text= "create table t1 (year datetime, month datetime, day datetime, " + "hour datetime, min datetime, sec datetime)"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); + + stmt= mysql_simple_prepare(mysql, + "INSERT INTO t1 VALUES (?, ?, ?, ?, ?, ?)"); + check_stmt(stmt); + verify_param_count(stmt, 6); + + bzero(bind, sizeof(bind)); + for (i= 0; i < 6; i++) + { + bind[i].buffer_type= MYSQL_TYPE_DATETIME; + bind[i].buffer= &tm[i]; + } + rc= mysql_stmt_bind_param(stmt, bind); + check_execute(stmt, rc); + + tm[0].year= 2004; tm[0].month= 11; tm[0].day= 10; + tm[0].hour= 12; tm[0].minute= 30; tm[0].second= 30; + tm[0].second_part= 0; tm[0].neg= 0; + + tm[5]= tm[4]= tm[3]= tm[2]= tm[1]= tm[0]; + tm[0].year= 10000; tm[1].month= 13; tm[2].day= 32; + tm[3].hour= 24; tm[4].minute= 60; tm[5].second= 60; + + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + DIE_UNLESS(mysql_warning_count(mysql) != 6); + + verify_col_data("t1", "year", "0000-00-00 00:00:00"); + verify_col_data("t1", "month", "0000-00-00 00:00:00"); + verify_col_data("t1", "day", "0000-00-00 00:00:00"); + verify_col_data("t1", "hour", "0000-00-00 00:00:00"); + verify_col_data("t1", "min", "0000-00-00 00:00:00"); + verify_col_data("t1", "sec", "0000-00-00 00:00:00"); + + mysql_stmt_close(stmt); + + stmt_text= "delete from t1"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); + + stmt= mysql_simple_prepare(mysql, "INSERT INTO t1 (year, month, day) " + "VALUES (?, ?, ?)"); + check_stmt(stmt); + verify_param_count(stmt, 3); + + /* + We reuse contents of bind and tm arrays left from previous part of test. + */ + for (i= 0; i < 3; i++) + bind[i].buffer_type= MYSQL_TYPE_DATE; + + rc= mysql_stmt_bind_param(stmt, bind); + check_execute(stmt, rc); + + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + DIE_UNLESS(mysql_warning_count(mysql) != 3); + + verify_col_data("t1", "year", "0000-00-00 00:00:00"); + verify_col_data("t1", "month", "0000-00-00 00:00:00"); + verify_col_data("t1", "day", "0000-00-00 00:00:00"); + + mysql_stmt_close(stmt); + + stmt_text= "drop table t1"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); + + stmt_text= "create table t1 (day_ovfl time, day time, hour time, min time, sec time)"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); + + stmt= mysql_simple_prepare(mysql, + "INSERT INTO t1 VALUES (?, ?, ?, ?, ?)"); + check_stmt(stmt); + verify_param_count(stmt, 5); + + /* + Again we reuse what we can from previous part of test. + */ + for (i= 0; i < 5; i++) + bind[i].buffer_type= MYSQL_TYPE_TIME; + + rc= mysql_stmt_bind_param(stmt, bind); + check_execute(stmt, rc); + + tm[0].year= 0; tm[0].month= 0; tm[0].day= 10; + tm[0].hour= 12; tm[0].minute= 30; tm[0].second= 30; + tm[0].second_part= 0; tm[0].neg= 0; + + tm[4]= tm[3]= tm[2]= tm[1]= tm[0]; + tm[0].day= 35; tm[1].day= 34; tm[2].hour= 30; tm[3].minute= 60; tm[4].second= 60; + + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + DIE_UNLESS(mysql_warning_count(mysql) != 2); + + verify_col_data("t1", "day_ovfl", "838:59:59"); + verify_col_data("t1", "day", "828:30:30"); + verify_col_data("t1", "hour", "270:30:30"); + verify_col_data("t1", "min", "00:00:00"); + verify_col_data("t1", "sec", "00:00:00"); + + mysql_stmt_close(stmt); + + stmt_text= "drop table t1"; + rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text)); + myquery(rc); +} + + static void test_bug4172() { MYSQL_STMT *stmt; @@ -11455,6 +11589,7 @@ static struct my_tests_st my_tests[]= { { "test_bug6046", test_bug6046 }, { "test_bug6081", test_bug6081 }, { "test_bug6096", test_bug6096 }, + { "test_datetime_ranges", test_datetime_ranges }, { "test_bug4172", test_bug4172 }, { "test_conversion", test_conversion }, { 0, 0 } |