diff options
author | unknown <lars/lthalmann@mysql.com/dl145j.mysql.com> | 2006-09-15 01:26:28 +0200 |
---|---|---|
committer | unknown <lars/lthalmann@mysql.com/dl145j.mysql.com> | 2006-09-15 01:26:28 +0200 |
commit | 2c168ec5684190bddff5a00b59a4ad949b5900b2 (patch) | |
tree | 53ae82054ec6a32a2109e9877a45c8473dc9bc69 | |
parent | 900e66f89567d740c4290963825206ac8a672df8 (diff) | |
parent | 70bb923a37257aa6a33abca49d60ed2dd005c65d (diff) | |
download | mariadb-git-2c168ec5684190bddff5a00b59a4ad949b5900b2.tar.gz |
Merge mysql.com:/users/lthalmann/bkroot/mysql-5.0-rpl
into mysql.com:/users/lthalmann/bk/MERGE/mysql-5.0-merge
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
Auto merged
sql/ha_ndbcluster.cc:
Auto merged
sql/sql_class.h:
Auto merged
sql/sql_yacc.yy:
Auto merged
client/mysqldump.c:
Merge main->rpl
mysql-test/r/mysqldump.result:
Merge main->rpl
mysql-test/t/mysqldump.test:
Merge main->rpl
24 files changed, 603 insertions, 149 deletions
diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest new file mode 100755 index 00000000000..be28cc28346 --- /dev/null +++ b/BUILD/compile-ndb-autotest @@ -0,0 +1,19 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags='-DERROR_INSERT'" +if [ "$full_debug" ] +then + extra_flags="$debug_cflags" + c_warnings="$c_warnings $debug_extra_warnings" + cxx_warnings="$cxx_warnings $debug_extra_warnings" + extra_configs="$debug_configs $extra_configs" +else + extra_flags="$fast_cflags" +fi + +extra_flags="$extra_flags $max_cflags -g" + +. "$path/FINISH.sh" diff --git a/client/mysqldump.c b/client/mysqldump.c index 5ecf334c9f7..c4a16bd26ae 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -650,13 +650,13 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), tty_password=1; break; case 'r': - if (!(md_result_file = my_fopen(argument, O_WRONLY | FILE_BINARY, + if (!(md_result_file= my_fopen(argument, O_WRONLY | FILE_BINARY, MYF(MY_WME)))) exit(1); break; case 'W': #ifdef __WIN__ - opt_protocol = MYSQL_PROTOCOL_PIPE; + opt_protocol= MYSQL_PROTOCOL_PIPE; #endif break; case 'N': @@ -671,7 +671,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #include <sslopt-case.h> case 'V': print_version(); exit(0); case 'X': - opt_xml = 1; + opt_xml= 1; extended_insert= opt_drop= opt_lock= opt_disable_keys= opt_autocommit= opt_create_db= 0; break; @@ -1432,7 +1432,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, const char *insert_option; char name_buff[NAME_LEN+3],table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3], query_buff[512]; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; int len; MYSQL_RES *result; MYSQL_ROW row; @@ -1476,7 +1476,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, opt_quoted_table= quote_name(table, table_buff2, 0); if (opt_order_by_primary) - order_by = primary_key_fields(result_table); + order_by= primary_key_fields(result_table); if (!opt_xml && !mysql_query_with_error_report(mysql, 0, query_buff)) { @@ -1528,7 +1528,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, field= mysql_fetch_field_direct(result, 0); if (strcmp(field->name, "View") == 0) { - char *scv_buff = NULL; + char *scv_buff= NULL; verbose_msg("-- It's a view, create dummy table for view\n"); @@ -1565,7 +1565,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); safe_exit(EX_MYSQLERR); - DBUG_RETURN(0); + DBUG_RETURN(0); } else my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); @@ -1929,7 +1929,7 @@ static void dump_triggers_for_table (char *table, char *db) char name_buff[NAME_LEN*4+3], table_buff[NAME_LEN*2+3]; char query_buff[512]; uint old_opt_compatible_mode=opt_compatible_mode; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; MYSQL_RES *result; MYSQL_ROW row; @@ -2173,15 +2173,15 @@ static void dump_table(char *table, char *db) end= strmov(end,buff); if (where || order_by) { - query = alloc_query_str((ulong) ((end - query) + 1 + + query= alloc_query_str((ulong) ((end - query) + 1 + (where ? strlen(where) + 7 : 0) + (order_by ? strlen(order_by) + 10 : 0))); - end = strmov(query, query_buf); + end= strmov(query, query_buf); if (where) - end = strxmov(end, " WHERE ", where, NullS); + end= strxmov(end, " WHERE ", where, NullS); if (order_by) - end = strxmov(end, " ORDER BY ", order_by, NullS); + end= strxmov(end, " ORDER BY ", order_by, NullS); } if (mysql_real_query(mysql, query, (uint) (end - query))) { @@ -2202,10 +2202,10 @@ static void dump_table(char *table, char *db) result_table); if (where || order_by) { - query = alloc_query_str((ulong) (strlen(query) + 1 + + query= alloc_query_str((ulong) (strlen(query) + 1 + (where ? strlen(where) + 7 : 0) + (order_by ? strlen(order_by) + 10 : 0))); - end = strmov(query, query_buf); + end= strmov(query, query_buf); if (where) { @@ -2214,7 +2214,7 @@ static void dump_table(char *table, char *db) fprintf(md_result_file, "-- WHERE: %s\n", where); check_io(md_result_file); } - end = strxmov(end, " WHERE ", where, NullS); + end= strxmov(end, " WHERE ", where, NullS); } if (order_by) { @@ -2223,7 +2223,7 @@ static void dump_table(char *table, char *db) fprintf(md_result_file, "-- ORDER BY: %s\n", order_by); check_io(md_result_file); } - end = strxmov(end, " ORDER BY ", order_by, NullS); + end= strxmov(end, " ORDER BY ", order_by, NullS); } } if (!opt_xml && !opt_compact) @@ -2296,12 +2296,12 @@ static void dump_table(char *table, char *db) check_io(md_result_file); } - for (i = 0; i < mysql_num_fields(res); i++) + for (i= 0; i < mysql_num_fields(res); i++) { int is_blob; ulong length= lengths[i]; - if (!(field = mysql_fetch_field(res))) + if (!(field= mysql_fetch_field(res))) { my_snprintf(query, QUERY_LENGTH, "%s: Not enough fields from table %s! Aborting.\n", @@ -2373,7 +2373,7 @@ static void dump_table(char *table, char *db) else { /* change any strings ("inf", "-inf", "nan") into NULL */ - char *ptr = row[i]; + char *ptr= row[i]; if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) dynstr_append(&extended_row, "NULL"); @@ -2433,7 +2433,7 @@ static void dump_table(char *table, char *db) else { /* change any strings ("inf", "-inf", "nan") into NULL */ - char *ptr = row[i]; + char *ptr= row[i]; if (opt_xml) { print_xml_tag1(md_result_file, "\t\t", "field name=", @@ -2479,10 +2479,10 @@ static void dump_table(char *table, char *db) { ulong row_length; dynstr_append(&extended_row,")"); - row_length = 2 + extended_row.length; + row_length= 2 + extended_row.length; if (total_length + row_length < opt_net_buffer_length) { - total_length += row_length; + total_length+= row_length; fputc(',',md_result_file); /* Always row break */ fputs(extended_row.str,md_result_file); } @@ -2494,7 +2494,7 @@ static void dump_table(char *table, char *db) fputs(insert_pat.str,md_result_file); fputs(extended_row.str,md_result_file); - total_length = row_length+init_length; + total_length= row_length+init_length; } check_io(md_result_file); } @@ -2559,15 +2559,15 @@ err: static char *getTableName(int reset) { - static MYSQL_RES *res = NULL; + static MYSQL_RES *res= NULL; MYSQL_ROW row; if (!res) { - if (!(res = mysql_list_tables(mysql,NullS))) + if (!(res= mysql_list_tables(mysql,NullS))) return(NULL); } - if ((row = mysql_fetch_row(res))) + if ((row= mysql_fetch_row(res))) return((char*) row[0]); if (reset) @@ -2575,7 +2575,7 @@ static char *getTableName(int reset) else { mysql_free_result(res); - res = NULL; + res= NULL; } return(NULL); } /* getTableName */ @@ -2589,7 +2589,7 @@ static int dump_all_databases() if (mysql_query_with_error_report(mysql, &tableres, "SHOW DATABASES")) return 1; - while ((row = mysql_fetch_row(tableres))) + while ((row= mysql_fetch_row(tableres))) { if (dump_all_tables_in_db(row[0])) result=1; @@ -2597,13 +2597,13 @@ static int dump_all_databases() if (seen_views) { if (mysql_query(mysql, "SHOW DATABASES") || - !(tableres = mysql_store_result(mysql))) + !(tableres= mysql_store_result(mysql))) { my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", MYF(0), mysql_error(mysql)); return 1; } - while ((row = mysql_fetch_row(tableres))) + while ((row= mysql_fetch_row(tableres))) { if (dump_all_views_in_db(row[0])) result=1; @@ -2670,7 +2670,7 @@ static int init_dumping(char *database) "SHOW CREATE DATABASE IF NOT EXISTS %s", qdatabase); - if (mysql_query(mysql, qbuf) || !(dbinfo = mysql_store_result(mysql))) + if (mysql_query(mysql, qbuf) || !(dbinfo= mysql_store_result(mysql))) { /* Old server version, dump generic CREATE DATABASE */ if (opt_drop_database) @@ -2687,7 +2687,7 @@ static int init_dumping(char *database) fprintf(md_result_file, "\n/*!40000 DROP DATABASE IF EXISTS %s*/;\n", qdatabase); - row = mysql_fetch_row(dbinfo); + row= mysql_fetch_row(dbinfo); if (row[1]) { fprintf(md_result_file,"\n%s;\n",row[1]); @@ -3005,7 +3005,7 @@ static int do_show_master_status(MYSQL *mysql_con) } else { - row = mysql_fetch_row(master); + row= mysql_fetch_row(master); if (row && row[0] && row[1]) { /* SHOW MASTER STATUS reports file and position */ @@ -3132,7 +3132,7 @@ static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, MYSQL_FIELD *field; mysql_field_seek(result, 0); - for ( ; (field = mysql_fetch_field(result)) ; row++) + for ( ; (field= mysql_fetch_field(result)) ; row++) { if (!strcmp(field->name,name)) { @@ -3260,17 +3260,19 @@ char check_if_ignore_table(const char *table_name, char *table_type) static char *primary_key_fields(const char *table_name) { - MYSQL_RES *res = NULL; + MYSQL_RES *res= NULL; MYSQL_ROW row; /* SHOW KEYS FROM + table name * 2 (escaped) + 2 quotes + \0 */ char show_keys_buff[15 + NAME_LEN * 2 + 3]; - uint result_length = 0; - char *result = 0; + uint result_length= 0; + char *result= 0; + char buff[NAME_LEN * 2 + 3]; + char *quoted_field; my_snprintf(show_keys_buff, sizeof(show_keys_buff), "SHOW KEYS FROM %s", table_name); if (mysql_query(mysql, show_keys_buff) || - !(res = mysql_store_result(mysql))) + !(res= mysql_store_result(mysql))) { fprintf(stderr, "Warning: Couldn't read keys from table %s;" " records are NOT sorted (%s)\n", @@ -3285,12 +3287,14 @@ static char *primary_key_fields(const char *table_name) * row, and UNIQUE keys come before others. So we only need to check * the first key, not all keys. */ - if ((row = mysql_fetch_row(res)) && atoi(row[1]) == 0) + if ((row= mysql_fetch_row(res)) && atoi(row[1]) == 0) { /* Key is unique */ do - result_length += strlen(row[4]) + 1; /* + 1 for ',' or \0 */ - while ((row = mysql_fetch_row(res)) && atoi(row[3]) > 1); + { + quoted_field= quote_name(row[4], buff, 0); + result_length+= strlen(quoted_field) + 1; /* + 1 for ',' or \0 */ + } while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1); } /* Build the ORDER BY clause result */ @@ -3298,17 +3302,21 @@ static char *primary_key_fields(const char *table_name) { char *end; /* result (terminating \0 is already in result_length) */ - result = my_malloc(result_length + 10, MYF(MY_WME)); + result= my_malloc(result_length + 10, MYF(MY_WME)); if (!result) { fprintf(stderr, "Error: Not enough memory to store ORDER BY clause\n"); goto cleanup; } mysql_data_seek(res, 0); - row = mysql_fetch_row(res); - end = strmov(result, row[4]); - while ((row = mysql_fetch_row(res)) && atoi(row[3]) > 1) - end = strxmov(end, ",", row[4], NullS); + row= mysql_fetch_row(res); + quoted_field= quote_name(row[4], buff, 0); + end= strmov(result, quoted_field); + while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1) + { + quoted_field= quote_name(row[4], buff, 0); + end= strxmov(end, ",", quoted_field, NullS); + } } cleanup: @@ -3376,7 +3384,7 @@ static my_bool get_view_structure(char *table, char* db) char table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3]; char query[QUERY_LENGTH]; - FILE *sql_file = md_result_file; + FILE *sql_file= md_result_file; DBUG_ENTER("get_view_structure"); if (opt_no_create_info) /* Don't write table creation info */ diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 6669b33e1bb..1f2ea5b0057 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -2892,3 +2892,127 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1; drop table t1; drop user mysqltest_1@localhost; +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqldump_tables` /*!40100 DEFAULT CHARACTER SET latin1 */; + +USE `mysqldump_tables`; +DROP TABLE IF EXISTS `basetable`; +CREATE TABLE `basetable` ( + `id` bigint(20) unsigned NOT NULL auto_increment, + `tag` varchar(64) default NULL, + UNIQUE KEY `id` (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; + +LOCK TABLES `basetable` WRITE; +/*!40000 ALTER TABLE `basetable` DISABLE KEYS */; +/*!40000 ALTER TABLE `basetable` ENABLE KEYS */; +UNLOCK TABLES; + +CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqldump_views` /*!40100 DEFAULT CHARACTER SET latin1 */; + +USE `mysqldump_views`; +DROP TABLE IF EXISTS `nasishnasifu`; +/*!50001 DROP VIEW IF EXISTS `nasishnasifu`*/; +/*!50001 CREATE TABLE `nasishnasifu` ( + `id` bigint(20) unsigned +) */; +/*!50001 DROP TABLE IF EXISTS `nasishnasifu`*/; +/*!50001 DROP VIEW IF EXISTS `nasishnasifu`*/; +/*!50001 CREATE ALGORITHM=UNDEFINED */ +/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */ +/*!50001 VIEW `mysqldump_views`.`nasishnasifu` AS select `mysqldump_tables`.`basetable`.`id` AS `id` from `mysqldump_tables`.`basetable` */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +drop view nasishnasifu; +drop database mysqldump_views; +drop table mysqldump_tables.basetable; +drop database mysqldump_tables; +USE test; +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( +`a b` INT, +`c"d` INT, +`e``f` INT, +PRIMARY KEY (`a b`, `c"d`, `e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +insert into t1 values (0815, 4711, 2006); +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO,ANSI' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS "t1"; +CREATE TABLE "t1" ( + "a b" int(11) NOT NULL default '0', + "c""d" int(11) NOT NULL default '0', + "e`f" int(11) NOT NULL default '0', + PRIMARY KEY ("a b","c""d","e`f") +); + +LOCK TABLES "t1" WRITE; +/*!40000 ALTER TABLE "t1" DISABLE KEYS */; +INSERT INTO "t1" VALUES (815,4711,2006); +/*!40000 ALTER TABLE "t1" ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( + `a b` int(11) NOT NULL default '0', + `c"d` int(11) NOT NULL default '0', + `e``f` int(11) NOT NULL default '0', + PRIMARY KEY (`a b`,`c"d`,`e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; + +LOCK TABLES `t1` WRITE; +/*!40000 ALTER TABLE `t1` DISABLE KEYS */; +INSERT INTO `t1` VALUES (815,4711,2006); +/*!40000 ALTER TABLE `t1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +DROP TABLE `t1`; +End of 5.0 tests diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 848c5360db7..7d9f0529ab2 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -1309,3 +1309,31 @@ grant REPLICATION CLIENT on *.* to mysqltest_1@localhost; # Clean up drop table t1; drop user mysqltest_1@localhost; + +--exec $MYSQL_DUMP --skip-comments --databases mysqldump_tables mysqldump_views; + +drop view nasishnasifu; +drop database mysqldump_views; +drop table mysqldump_tables.basetable; +drop database mysqldump_tables; +USE test; + +# +# BUG#13926: --order-by-primary fails if PKEY contains quote character +# +--disable_warnings +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( + `a b` INT, + `c"d` INT, + `e``f` INT, + PRIMARY KEY (`a b`, `c"d`, `e``f`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +insert into t1 values (0815, 4711, 2006); + +--exec $MYSQL_DUMP --skip-comments --compatible=ansi --order-by-primary test t1 +--exec $MYSQL_DUMP --skip-comments --order-by-primary test t1 +DROP TABLE `t1`; +--enable_warnings +--echo End of 5.0 tests + diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp index 4db541f7fe4..dbc343d2238 100644 --- a/ndb/include/ndbapi/NdbOperation.hpp +++ b/ndb/include/ndbapi/NdbOperation.hpp @@ -477,7 +477,7 @@ public: /** * Interpreted program instruction: - * Substract RegSource1 from RegSource2 and put the result in RegDest. + * Substract RegSource2 from RegSource1 and put the result in RegDest. * * @param RegSource1 First register. * @param RegSource2 Second register. diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 7fee2e92f2b..c8c9e82efc2 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -6,7 +6,7 @@ Next DBTUP 4014 Next DBLQH 5043 Next DBDICT 6007 Next DBDIH 7177 -Next DBTC 8037 +Next DBTC 8038 Next CMVMI 9000 Next BACKUP 10022 Next DBUTIL 11002 @@ -283,6 +283,7 @@ ABORT OF TCKEYREQ 8032: No free TC records any more +8037 : Invalid schema version in TCINDXREQ CMVMI ----- diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 491aa0849b9..1c1fdb41d51 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -8252,11 +8252,21 @@ void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr) /* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */ /* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */ /* ---------------------------------------------------------------------- */ - ndbrequire(filePtr.i == tabPtr.p->tabFile[0]); - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_TABLE; + if (filePtr.i == tabPtr.p->tabFile[0]) + { + filePtr.i = tabPtr.p->tabFile[1]; + ptrCheckGuard(filePtr, cfileFileSize, fileRecord); + openFileRw(signal, filePtr); + filePtr.p->reqStatus = FileRecord::OPENING_TABLE; + } + else + { + char buf[256]; + BaseString::snprintf(buf, sizeof(buf), + "Error opening DIH schema files for table: %d", + tabPtr.i); + progError(__LINE__, NDBD_EXIT_AFS_NO_SUCH_FILE, buf); + } }//Dbdih::openingTableErrorLab() void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr) @@ -8422,6 +8432,7 @@ Dbdih::resetReplicaSr(TabRecordPtr tabPtr){ } replicaPtr.i = nextReplicaPtrI; }//while + updateNodeInfo(fragPtr); } } diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 0ea49e47fc7..7286481002f 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -6470,6 +6470,7 @@ void Dblqh::execACC_ABORTCONF(Signal* signal) * A NORMAL EVENT DURING CREATION OF A FRAGMENT. WE NOW NEED TO CONTINUE * WITH NORMAL COMMIT PROCESSING. * ---------------------------------------------------------------------- */ + regTcPtr->totSendlenAi = regTcPtr->totReclenAi; if (regTcPtr->currTupAiLen == regTcPtr->totReclenAi) { jam(); regTcPtr->abortState = TcConnectionrec::ABORT_IDLE; @@ -12579,19 +12580,17 @@ void Dblqh::lastWriteInFileLab(Signal* signal) void Dblqh::writePageZeroLab(Signal* signal) { - if (false && logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM) + if (logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM) { if (logPartPtr.p->firstLogQueue == RNIL) { jam(); logPartPtr.p->logPartState = LogPartRecord::IDLE; - ndbout_c("resetting logPartState to IDLE"); } else { jam(); logPartPtr.p->logPartState = LogPartRecord::ACTIVE; - ndbout_c("resetting logPartState to ACTIVE"); } } @@ -14623,6 +14622,8 @@ void Dblqh::execSr(Signal* signal) LogFileRecordPtr nextLogFilePtr; LogPageRecordPtr tmpLogPagePtr; Uint32 logWord; + Uint32 line; + const char * crash_msg = 0; jamEntry(); logPartPtr.i = signal->theData[0]; @@ -14833,8 +14834,14 @@ void Dblqh::execSr(Signal* signal) /* PLACE THAN IN THE FIRST PAGE OF A NEW FILE IN THE FIRST POSITION AFTER THE*/ /* HEADER. */ /*---------------------------------------------------------------------------*/ - ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == - (ZPAGE_HEADER_SIZE + ZPOS_NO_FD)); + if (unlikely(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] != + (ZPAGE_HEADER_SIZE + ZPOS_NO_FD))) + { + line = __LINE__; + logWord = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; + crash_msg = "ZFD_TYPE at incorrect position!"; + goto crash; + } { Uint32 noFdDescriptors = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD]; @@ -14871,19 +14878,10 @@ void Dblqh::execSr(Signal* signal) /*---------------------------------------------------------------------------*/ /* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */ /*---------------------------------------------------------------------------*/ - signal->theData[0] = RNIL; - signal->theData[1] = logPartPtr.i; - Uint32 tmp = logFilePtr.p->fileName[3]; - tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX. - signal->theData[2] = tmp; - signal->theData[3] = logFilePtr.p->fileNo; - signal->theData[4] = logFilePtr.p->currentFilepage; - signal->theData[5] = logFilePtr.p->currentMbyte; - signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - signal->theData[7] = ~0; - signal->theData[8] = __LINE__; - sendSignal(cownref, GSN_DEBUG_SIG, signal, 9, JBA); - return; + line = __LINE__; + logWord = ZNEXT_MBYTE_TYPE; + crash_msg = "end of log wo/ having found last GCI"; + goto crash; }//if }//if /*---------------------------------------------------------------------------*/ @@ -14938,19 +14936,9 @@ void Dblqh::execSr(Signal* signal) /*---------------------------------------------------------------------------*/ /* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */ /*---------------------------------------------------------------------------*/ - signal->theData[0] = RNIL; - signal->theData[1] = logPartPtr.i; - Uint32 tmp = logFilePtr.p->fileName[3]; - tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX. - signal->theData[2] = tmp; - signal->theData[3] = logFilePtr.p->fileNo; - signal->theData[4] = logFilePtr.p->currentMbyte; - signal->theData[5] = logFilePtr.p->currentFilepage; - signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - signal->theData[7] = logWord; - signal->theData[8] = __LINE__; - sendSignal(cownref, GSN_DEBUG_SIG, signal, 9, JBA); - return; + line = __LINE__; + crash_msg = "Invalid logword"; + goto crash; break; }//switch /*---------------------------------------------------------------------------*/ @@ -14958,6 +14946,35 @@ void Dblqh::execSr(Signal* signal) // that we reach a new page. /*---------------------------------------------------------------------------*/ } while (1); + return; + +crash: + signal->theData[0] = RNIL; + signal->theData[1] = logPartPtr.i; + Uint32 tmp = logFilePtr.p->fileName[3]; + tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX. + signal->theData[2] = tmp; + signal->theData[3] = logFilePtr.p->fileNo; + signal->theData[4] = logFilePtr.p->currentMbyte; + signal->theData[5] = logFilePtr.p->currentFilepage; + signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; + signal->theData[7] = logWord; + signal->theData[8] = line; + + char buf[255]; + BaseString::snprintf(buf, sizeof(buf), + "Error while reading REDO log. from %d\n" + "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d : %s", + signal->theData[8], + signal->theData[2], + signal->theData[3], + signal->theData[4], + signal->theData[5], + signal->theData[6], + signal->theData[7], + crash_msg ? crash_msg : ""); + + progError(__LINE__, NDBD_EXIT_SR_REDOLOG, buf); }//Dblqh::execSr() /*---------------------------------------------------------------------------*/ @@ -14973,8 +14990,8 @@ void Dblqh::execDEBUG_SIG(Signal* signal) UintR tdebug; jamEntry(); - logPagePtr.i = signal->theData[0]; - tdebug = logPagePtr.p->logPageWord[0]; + //logPagePtr.i = signal->theData[0]; + //tdebug = logPagePtr.p->logPageWord[0]; char buf[100]; BaseString::snprintf(buf, 100, diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index ac7fca9cf93..bf6ce7129ba 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -720,7 +720,7 @@ public: // Index data - bool isIndexOp; // Used to mark on-going TcKeyReq as indx table access + Uint8 isIndexOp; // Used to mark on-going TcKeyReq as indx table access bool indexOpReturn; UintR noIndexOp; // No outstanding index ops @@ -808,7 +808,7 @@ public: UintR savedState[LqhKeyConf::SignalLength]; // Index data - bool isIndexOp; // Used to mark on-going TcKeyReq as index table access + Uint8 isIndexOp; // Used to mark on-going TcKeyReq as index table access UintR indexOp; UintR currentIndexId; UintR attrInfoLen; diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 71f3aff05d4..dda743616f4 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -1775,8 +1775,7 @@ void Dbtc::execKEYINFO(Signal* signal) apiConnectptr.i = signal->theData[0]; tmaxData = 20; if (apiConnectptr.i >= capiConnectFilesize) { - jam(); - warningHandlerLab(signal, __LINE__); + TCKEY_abort(signal, 18); return; }//if ptrAss(apiConnectptr, apiConnectRecord); @@ -1785,9 +1784,7 @@ void Dbtc::execKEYINFO(Signal* signal) compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; compare_transid1 = compare_transid1 | compare_transid2; if (compare_transid1 != 0) { - jam(); - printState(signal, 10); - sendSignalErrorRefuseLab(signal); + TCKEY_abort(signal, 19); return; }//if switch (apiConnectptr.p->apiConnectstate) { @@ -2531,7 +2528,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo); Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo); - bool isIndexOp = regApiPtr->isIndexOp; + Uint8 isIndexOp = regApiPtr->isIndexOp; bool isIndexOpReturn = regApiPtr->indexOpReturn; regApiPtr->isIndexOp = false; // Reset marker regApiPtr->m_exec_flag |= TexecFlag; @@ -3277,7 +3274,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal, sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16); sig2 = regApiPtr->transid[0]; sig3 = regApiPtr->transid[1]; - sig4 = regApiPtr->ndbapiBlockref; + sig4 = (regTcPtr->isIndexOp == 2) ? reference() : regApiPtr->ndbapiBlockref; sig5 = regTcPtr->clientData; sig6 = regCachePtr->scanInfo; @@ -8619,6 +8616,7 @@ void Dbtc::execSCAN_TABREQ(Signal* signal) // left over from simple/dirty read } else { jam(); + jamLine(transP->apiConnectstate); errCode = ZSTATE_ERROR; goto SCAN_TAB_error_no_state_change; } @@ -12036,14 +12034,18 @@ void Dbtc::readIndexTable(Signal* signal, opType == ZREAD ? ZREAD : ZREAD_EX); TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0); - BlockReference originalReceiver = regApiPtr->ndbapiBlockref; - regApiPtr->ndbapiBlockref = reference(); // Send result to me tcKeyReq->senderData = indexOp->indexOpId; indexOp->indexOpState = IOS_INDEX_ACCESS; regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp; regApiPtr->accumulatingIndexOp = RNIL; - regApiPtr->isIndexOp = true; + regApiPtr->isIndexOp = 2; + if (ERROR_INSERTED(8037)) + { + ndbout_c("shifting index version"); + tcKeyReq->tableSchemaVersion = ~(Uint32)indexOp->tcIndxReq.tableSchemaVersion; + } + Uint32 remainingKey = indexOp->keyInfo.getSize(); bool moreKeyData = indexOp->keyInfo.first(keyIter); // *********** KEYINFO in TCKEYREQ *********** @@ -12062,21 +12064,13 @@ void Dbtc::readIndexTable(Signal* signal, ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0); ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0); EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); - - /** - * "Fool" TC not to start commiting transaction since it always will - * have one outstanding lqhkeyreq - * This is later decreased when the index read is complete - */ - regApiPtr->lqhkeyreqrec++; + jamEntry(); - /** - * Remember ptr to index read operation - * (used to set correct save point id on index operation later) - */ - indexOp->indexReadTcConnect = regApiPtr->lastTcConnect; + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + goto err; + } - jamEntry(); // *********** KEYINFO *********** if (moreKeyData) { jam(); @@ -12096,6 +12090,10 @@ void Dbtc::readIndexTable(Signal* signal, EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + goto err; + } dataPos = 0; dataPtr = (Uint32 *) &keyInfo->keyData; } @@ -12106,10 +12104,32 @@ void Dbtc::readIndexTable(Signal* signal, EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, KeyInfo::HeaderLength + dataPos); jamEntry(); + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + goto err; + } } } - regApiPtr->ndbapiBlockref = originalReceiver; // reset original receiver + /** + * "Fool" TC not to start commiting transaction since it always will + * have one outstanding lqhkeyreq + * This is later decreased when the index read is complete + */ + regApiPtr->lqhkeyreqrec++; + + /** + * Remember ptr to index read operation + * (used to set correct save point id on index operation later) + */ + indexOp->indexReadTcConnect = regApiPtr->lastTcConnect; + +done: + return; + +err: + jam(); + goto done; } /** @@ -12160,7 +12180,7 @@ void Dbtc::executeIndexOperation(Signal* signal, tcKeyReq->transId2 = regApiPtr->transid[1]; tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API indexOp->indexOpState = IOS_INDEX_OPERATION; - regApiPtr->isIndexOp = true; + regApiPtr->isIndexOp = 1; regApiPtr->executingIndexOp = indexOp->indexOpId;; regApiPtr->noIndexOp++; // Increase count @@ -12233,9 +12253,16 @@ void Dbtc::executeIndexOperation(Signal* signal, const Uint32 currSavePointId = regApiPtr->currSavePointId; regApiPtr->currSavePointId = tmp.p->savePointId; EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); + jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + regApiPtr->currSavePointId = currSavePointId; - jamEntry(); // *********** KEYINFO *********** if (moreKeyData) { jam(); @@ -12256,6 +12283,13 @@ void Dbtc::executeIndexOperation(Signal* signal, EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPos = 0; dataPtr = (Uint32 *) &keyInfo->keyData; } @@ -12266,6 +12300,12 @@ void Dbtc::executeIndexOperation(Signal* signal, EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, KeyInfo::HeaderLength + dataPos); jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } } } @@ -12295,6 +12335,13 @@ void Dbtc::executeIndexOperation(Signal* signal, EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, AttrInfo::HeaderLength + AttrInfo::DataLength); jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + attrInfoPos = 0; dataPtr = (Uint32 *) &attrInfo->attrData; } @@ -12694,9 +12741,16 @@ void Dbtc::insertIntoIndexTable(Signal* signal, const Uint32 currSavePointId = regApiPtr->currSavePointId; regApiPtr->currSavePointId = opRecord->savePointId; EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); + jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + regApiPtr->currSavePointId = currSavePointId; tcConnectptr.p->currentIndexId = indexData->indexId; - jamEntry(); // *********** KEYINFO *********** if (moreKeyData) { @@ -12726,6 +12780,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &keyInfo->keyData; dataPos = 0; } @@ -12761,6 +12821,13 @@ void Dbtc::insertIntoIndexTable(Signal* signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); #endif + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &keyInfo->keyData; dataPos = 0; } @@ -12778,6 +12845,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal, KeyInfo::HeaderLength + dataPos); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } } } @@ -12813,6 +12885,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal, AttrInfo::HeaderLength + AttrInfo::DataLength); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &attrInfo->attrData; attrInfoPos = 0; } @@ -12849,6 +12927,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal, AttrInfo::HeaderLength + AttrInfo::DataLength); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &attrInfo->attrData; attrInfoPos = 0; } @@ -12994,9 +13078,16 @@ void Dbtc::deleteFromIndexTable(Signal* signal, const Uint32 currSavePointId = regApiPtr->currSavePointId; regApiPtr->currSavePointId = opRecord->savePointId; EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); + jamEntry(); + + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + regApiPtr->currSavePointId = currSavePointId; tcConnectptr.p->currentIndexId = indexData->indexId; - jamEntry(); // *********** KEYINFO *********** if (moreKeyData) { @@ -13027,6 +13118,12 @@ void Dbtc::deleteFromIndexTable(Signal* signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &keyInfo->keyData; dataPos = 0; } @@ -13063,6 +13160,12 @@ void Dbtc::deleteFromIndexTable(Signal* signal, KeyInfo::HeaderLength + KeyInfo::DataLength); jamEntry(); #endif + if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) + { + jam(); + return; + } + dataPtr = (Uint32 *) &keyInfo->keyData; dataPos = 0; } diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index f83f21f14d8..13c0bad9c7a 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -1113,14 +1113,16 @@ Dbtup::updateStartLab(Signal* signal, regOperPtr->pageOffset, &cinBuffer[0], regOperPtr->attrinbufLen); - if (retValue == -1) { - tupkeyErrorLab(signal); - return -1; - }//if } else { jam(); retValue = interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset); }//if + + if (retValue == -1) { + tupkeyErrorLab(signal); + return -1; + }//if + ndbrequire(regOperPtr->tupVersion != ZNIL); pagePtr->pageWord[regOperPtr->pageOffset + 1] = regOperPtr->tupVersion; if (regTabPtr->checksumIndicator) { diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp index 9722aa437c0..8a18fddae19 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp @@ -184,24 +184,28 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, /* PROPER AMOUNT OF PAGES WERE NOT FOUND. FIND AS MUCH AS */ /* POSSIBLE. */ /* ---------------------------------------------------------------- */ - for (Uint32 j = firstListToCheck; (Uint32)~j; j--) { + if (firstListToCheck) + { ljam(); - if (cfreepageList[j] != RNIL) { + for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) { ljam(); + if (cfreepageList[j] != RNIL) { + ljam(); /* ---------------------------------------------------------------- */ /* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */ /* ---------------------------------------------------------------- */ - allocPageRef = cfreepageList[j]; - removeCommonArea(allocPageRef, j); - noOfPagesAllocated = 1 << j; - findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated, - noOfPagesToAllocate); - findFreeRightNeighbours(allocPageRef, noOfPagesAllocated, - noOfPagesToAllocate); - - return; - }//if - }//for + allocPageRef = cfreepageList[j]; + removeCommonArea(allocPageRef, j); + noOfPagesAllocated = 1 << j; + findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated, + noOfPagesToAllocate); + findFreeRightNeighbours(allocPageRef, noOfPagesAllocated, + noOfPagesToAllocate); + + return; + }//if + }//for + } /* ---------------------------------------------------------------- */ /* NO FREE AREA AT ALL EXISTED. RETURN ZERO PAGES */ /* ---------------------------------------------------------------- */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index acdb73704cb..0bb7c8a1e41 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -397,12 +397,12 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% noAllocPages += 2; - regFragPtr->noOfPagesToGrow += noAllocPages; /* -----------------------------------------------------------------*/ // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. /* -----------------------------------------------------------------*/ - allocFragPages(regFragPtr, noAllocPages); + Uint32 allocated = allocFragPages(regFragPtr, noAllocPages); + regFragPtr->noOfPagesToGrow += allocated; }//Dbtup::allocMoreFragPages() Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr) diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index e6bb4d4f14f..fe6caf04d8c 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -181,10 +181,9 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal) case SystemError::CopyFragRefError: BaseString::snprintf(buf, sizeof(buf), - "Node %d killed this node because " - "it could not copy a fragment during node restart. " - "Copy fragment error code: %u.", - killingNode, data1); + "Killed by node %d as " + "copyfrag failed, error: %u", + killingNode, data1); break; default: @@ -2043,6 +2042,11 @@ void Ndbcntr::execSET_VAR_REQ(Signal* signal) { void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{ NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0]; + if (newState.startLevel == NodeState::SL_STARTED) + { + CRASH_INSERTION(1000); + } + stateRep->nodeState = newState; stateRep->nodeState.masterNodeId = cmasterNodeId; stateRep->nodeState.setNodeGroup(c_nodeGroup); @@ -2843,7 +2847,7 @@ void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){ cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); } } - + signal->theData[0] = NDB_LE_NDBStartCompleted; signal->theData[1] = NDB_VERSION; cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); diff --git a/ndb/src/mgmapi/ndb_logevent.cpp b/ndb/src/mgmapi/ndb_logevent.cpp index a90d5658506..2472a434590 100644 --- a/ndb/src/mgmapi/ndb_logevent.cpp +++ b/ndb/src/mgmapi/ndb_logevent.cpp @@ -68,6 +68,13 @@ ndb_mgm_create_logevent_handle(NdbMgmHandle mh, } extern "C" +int +ndb_logevent_get_fd(const NdbLogEventHandle h) +{ + return h->socket; +} + +extern "C" void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle * h) { if( !h ) diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 90e3a63c53d..c5a0ebbaf60 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -475,6 +475,8 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend) idx = m_current_api_receiver; last = m_api_receivers_count; + + Uint32 timeout = tp->m_waitfor_timeout; do { if(theError.code){ @@ -502,7 +504,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend) */ theNdb->theImpl->theWaiter.m_node = nodeId; theNdb->theImpl->theWaiter.m_state = WAIT_SCAN; - int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); + int return_code = theNdb->receiveResponse(3*timeout); if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) { continue; } else { @@ -1365,6 +1367,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed, return -1; Uint32 seq = theNdbCon->theNodeSequence; Uint32 nodeId = theNdbCon->theDBnode; + Uint32 timeout = tp->m_waitfor_timeout; if(seq == tp->getNodeSequence(nodeId) && !send_next_scan_ordered(s_idx, forceSend)){ Uint32 tmp = m_sent_receivers_count; @@ -1372,7 +1375,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed, while(m_sent_receivers_count > 0 && !theError.code){ theNdb->theImpl->theWaiter.m_node = nodeId; theNdb->theImpl->theWaiter.m_state = WAIT_SCAN; - int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); + int return_code = theNdb->receiveResponse(3*timeout); if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) { continue; } @@ -1513,6 +1516,8 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){ return -1; } + Uint32 timeout = tp->m_waitfor_timeout; + /** * Wait for outstanding */ @@ -1520,7 +1525,7 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){ { theNdb->theImpl->theWaiter.m_node = nodeId; theNdb->theImpl->theWaiter.m_state = WAIT_SCAN; - int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); + int return_code = theNdb->receiveResponse(3*timeout); switch(return_code){ case 0: break; @@ -1590,7 +1595,7 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){ { theNdb->theImpl->theWaiter.m_node = nodeId; theNdb->theImpl->theWaiter.m_state = WAIT_SCAN; - int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); + int return_code = theNdb->receiveResponse(3*timeout); switch(return_code){ case 0: break; diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 5785db232c4..c25aae55897 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1199,6 +1199,48 @@ int runLQHKEYREF(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int +runBug21384(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb* pNdb = GETNDB(step); + HugoTransactions hugoTrans(*ctx->getTab()); + NdbRestarter restarter; + + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + const int batchsize = ctx->getProperty("BatchSize", 50); + + while (loops--) + { + if(restarter.insertErrorInAllNodes(8037) != 0) + { + g_err << "Failed to error insert(8037)" << endl; + return NDBT_FAILED; + } + + if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchsize) == 0) + { + g_err << "Index succeded (it should have failed" << endl; + return NDBT_FAILED; + } + + if(restarter.insertErrorInAllNodes(0) != 0) + { + g_err << "Failed to error insert(0)" << endl; + return NDBT_FAILED; + } + + if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchsize) != 0){ + g_err << "Index read failed" << endl; + return NDBT_FAILED; + } + } + + return NDBT_OK; +} + + + NDBT_TESTSUITE(testIndex); TESTCASE("CreateAll", "Test that we can create all various indexes on each table\n" @@ -1512,6 +1554,16 @@ TESTCASE("UniqueNull", FINALIZER(createPkIndex_Drop); FINALIZER(runClearTable); } +TESTCASE("Bug21384", + "Test that unique indexes and nulls"){ + TC_PROPERTY("LoggedIndexes", (unsigned)0); + INITIALIZER(runClearTable); + INITIALIZER(createPkIndex); + INITIALIZER(runLoadTable); + STEP(runBug21384); + FINALIZER(createPkIndex_Drop); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testIndex); int main(int argc, const char** argv){ diff --git a/ndb/test/ndbapi/testSystemRestart.cpp b/ndb/test/ndbapi/testSystemRestart.cpp index 30f7aca9b06..8a0100ff3e4 100644 --- a/ndb/test/ndbapi/testSystemRestart.cpp +++ b/ndb/test/ndbapi/testSystemRestart.cpp @@ -1121,6 +1121,46 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int +runBug21536(NDBT_Context* ctx, NDBT_Step* step) +{ + NdbRestarter restarter; + const Uint32 nodeCount = restarter.getNumDbNodes(); + if(nodeCount != 2){ + g_info << "Bug21536 - 2 nodes to test" << endl; + return NDBT_OK; + } + + int node1 = restarter.getDbNodeId(rand() % nodeCount); + int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand()); + + if (node1 == -1 || node2 == -1) + return NDBT_OK; + + int result = NDBT_OK; + do { + CHECK(restarter.restartOneDbNode(node1, false, true, true) == 0); + CHECK(restarter.waitNodesNoStart(&node1, 1) == 0); + CHECK(restarter.insertErrorInNode(node1, 1000) == 0); + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + CHECK(restarter.dumpStateOneNode(node1, val2, 2) == 0); + CHECK(restarter.startNodes(&node1, 1) == 0); + restarter.waitNodesStartPhase(&node1, 1, 3, 120); + CHECK(restarter.waitNodesNoStart(&node1, 1) == 0); + + CHECK(restarter.restartOneDbNode(node2, true, true, true) == 0); + CHECK(restarter.waitNodesNoStart(&node2, 1) == 0); + CHECK(restarter.startNodes(&node1, 1) == 0); + CHECK(restarter.waitNodesStarted(&node1, 1) == 0); + CHECK(restarter.startNodes(&node2, 1) == 0); + CHECK(restarter.waitClusterStarted() == 0); + + } while(0); + + g_info << "Bug21536 finished" << endl; + + return result; +} NDBT_TESTSUITE(testSystemRestart); TESTCASE("SR1", @@ -1287,6 +1327,13 @@ TESTCASE("Bug18385", STEP(runBug18385); FINALIZER(runClearTable); } +TESTCASE("Bug21536", + "Perform partition system restart with other nodes with higher GCI"){ + INITIALIZER(runWaitStarted); + INITIALIZER(runClearTable); + STEP(runBug21536); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testSystemRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index cbb8a9a2574..a2edc568426 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -449,6 +449,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug20185 T1 +max-time: 1000 +cmd: testIndex +args: -n Bug21384 + # OLD FLEX max-time: 500 cmd: flexBench diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5d6fe5f984f..81b1fdcad33 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -35,6 +35,7 @@ // options from from mysqld.cc extern my_bool opt_ndb_optimized_node_selection; extern const char *opt_ndbcluster_connectstring; +extern ulong opt_ndb_cache_check_time; // Default value for parallelism static const int parallelism= 0; @@ -5238,6 +5239,7 @@ bool ndbcluster_init() pthread_cond_init(&COND_ndb_util_thread, NULL); + ndb_cache_check_time = opt_ndb_cache_check_time; // Create utility thread pthread_t tmp; if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0)) diff --git a/sql/log.cc b/sql/log.cc index ebd1d10d8b7..2a546e47ded 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -723,13 +723,18 @@ shutdown the MySQL server and restart it.", name, errno); int MYSQL_LOG::get_current_log(LOG_INFO* linfo) { pthread_mutex_lock(&LOCK_log); + int ret = raw_get_current_log(linfo); + pthread_mutex_unlock(&LOCK_log); + return ret; +} + +int MYSQL_LOG::raw_get_current_log(LOG_INFO* linfo) +{ strmake(linfo->log_file_name, log_file_name, sizeof(linfo->log_file_name)-1); linfo->pos = my_b_tell(&log_file); - pthread_mutex_unlock(&LOCK_log); return 0; } - /* Move all data up in a file in an filename index file diff --git a/sql/sql_class.h b/sql/sql_class.h index 53a95a89b51..039c133e885 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -342,6 +342,7 @@ public: bool need_mutex); int find_next_log(LOG_INFO* linfo, bool need_mutex); int get_current_log(LOG_INFO* linfo); + int raw_get_current_log(LOG_INFO* linfo); uint next_file_id(); inline bool is_open() { return log_type != LOG_CLOSED; } inline char* get_index_fname() { return index_file_name;} diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index ccda69522c7..e1933d42f9e 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1494,10 +1494,14 @@ bool show_binlogs(THD* thd) if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); + + pthread_mutex_lock(mysql_bin_log.get_log_lock()); mysql_bin_log.lock_index(); index_file=mysql_bin_log.get_index_file(); - - mysql_bin_log.get_current_log(&cur); + + mysql_bin_log.raw_get_current_log(&cur); // dont take mutex + pthread_mutex_unlock(mysql_bin_log.get_log_lock()); // lockdep, OK + cur_dir_len= dirname_length(cur.log_file_name); reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index d2aca27c836..97db65fb917 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -6892,6 +6892,9 @@ load: LOAD DATA_SYM YYABORT; } lex->sql_command = SQLCOM_LOAD_MASTER_TABLE; + WARN_DEPRECATED("LOAD TABLE FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING)) YYABORT; }; @@ -6930,6 +6933,9 @@ load_data: FROM MASTER_SYM { Lex->sql_command = SQLCOM_LOAD_MASTER_DATA; + WARN_DEPRECATED("LOAD DATA FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); }; opt_local: |