From 81b5dbe25bb2ec4c89348539323fc6a9ba844a14 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 26 Oct 2007 08:42:33 +0200 Subject: ndb - bug#31635 (5.0) 0 pad varsize keys in ndbapi mysql-test/r/ndb_basic.result: new testcase mysql-test/t/ndb_basic.test: new testcase ndb/src/ndbapi/NdbOperationDefine.cpp: pad varsize keys ndb/src/ndbapi/NdbOperationSearch.cpp: pad varsize keys --- mysql-test/r/ndb_basic.result | 24 ++++++++++++++++++++++++ mysql-test/t/ndb_basic.test | 22 ++++++++++++++++++++-- ndb/src/ndbapi/NdbOperationDefine.cpp | 33 +++++++++++++++++++++++++-------- ndb/src/ndbapi/NdbOperationSearch.cpp | 30 +++++++++++++++++++++++------- 4 files changed, 92 insertions(+), 17 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 346b1d5741b..08bc6c9a3aa 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -841,4 +841,28 @@ a b 3 30 4 1 drop table t1,t2; +create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB; +insert into t1 values +('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc'); +replace into t1 values ('a', '-a'); +replace into t1 values ('b', '-b'); +replace into t1 values ('c', '-c'); +replace into t1 values ('aa', '-aa'); +replace into t1 values ('bb', '-bb'); +replace into t1 values ('cc', '-cc'); +replace into t1 values ('aaa', '-aaa'); +replace into t1 values ('bbb', '-bbb'); +replace into t1 values ('ccc', '-ccc'); +select * from t1 order by 1,2; +a b +a -a +aa -aa +aaa -aaa +b -b +bb -bb +bbb -bbb +c -c +cc -cc +ccc -ccc +drop table t1; End of 5.0 tests diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 80c8942348c..e9401ff3894 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -780,7 +780,25 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3; select * from t1 order by a; drop table t1,t2; -# End of 5.0 tests ---echo End of 5.0 tests +# +# Bug#31635 +# +create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB; +insert into t1 values + ('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc'); +replace into t1 values ('a', '-a'); +replace into t1 values ('b', '-b'); +replace into t1 values ('c', '-c'); +replace into t1 values ('aa', '-aa'); +replace into t1 values ('bb', '-bb'); +replace into t1 values ('cc', '-cc'); +replace into t1 values ('aaa', '-aaa'); +replace into t1 values ('bbb', '-bbb'); +replace into t1 values ('ccc', '-ccc'); +select * from t1 order by 1,2; +drop table t1; + +# End of 5.0 tests +--echo End of 5.0 tests diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index 95e90609f9b..50f64d4dda4 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -572,15 +572,32 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, * If it is not aligned then we start by copying the value to tempData and * use this as aValue instead. *************************************************************************/ - const int attributeSize = sizeInBytes; - const int slack = sizeInBytes & 3; + int attributeSize = sizeInBytes; + int slack = (sizeInBytes & 3) ? 4 - (sizeInBytes & 3) : 0; + switch(tAttrInfo->m_type){ + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Varbinary: + attributeSize = 1 + *(Uint8*)aValue; + slack = 4 * totalSizeInWords - attributeSize; + break; + case NdbDictionary::Column::Longvarchar: + case NdbDictionary::Column::Longvarbinary: + { + const Uint8* ptr = (const Uint8*)aValue; + attributeSize = 2 + ptr[0] + 256 * ptr[1]; + slack = 4 * totalSizeInWords - attributeSize; + break; + } + } - if (((UintPtr)aValue & 3) != 0 || (slack != 0)){ - memcpy(&tempData[0], aValue, attributeSize); - aValue = (char*)&tempData[0]; - if(slack != 0) { - char * tmp = (char*)&tempData[0]; - memset(&tmp[attributeSize], 0, (4 - slack)); + if (((UintPtr)aValue & 3) != 0 || (slack != 0)) + { + char * tmp = (char*)tempData; + memcpy(tmp, aValue, attributeSize); + aValue = tmp; + if(slack != 0) + { + bzero(tmp + attributeSize, slack); }//if }//if diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index a3e3f7a7a91..5639c5dfc09 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -129,6 +129,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, OperationType tOpType = theOperationType; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + const Uint32 totalSizeInWords = (sizeInBytes + 3) / 4; Uint32 real_len; if (! tAttrInfo->get_var_length(aValue, real_len)) { @@ -150,20 +151,35 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, * aValue. If it is not aligned then we start by copying the value to * tempData and use this as aValue instead. ***********************************************************************/ - const int attributeSize = sizeInBytes; - const int slack = sizeInBytes & 3; + int attributeSize = sizeInBytes; + int slack = (sizeInBytes & 3) ? 4 - (sizeInBytes & 3) : 0; const int align = UintPtr(aValue) & 7; + switch(tAttrInfo->m_type){ + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Varbinary: + attributeSize = 1 + *(Uint8*)aValue; + slack = 4 * totalSizeInWords - attributeSize; + break; + case NdbDictionary::Column::Longvarchar: + case NdbDictionary::Column::Longvarbinary: + { + const Uint8* ptr = (const Uint8*)aValue; + attributeSize = 2 + ptr[0] + 256 * ptr[1]; + slack = 4*totalSizeInWords - attributeSize; + break; + } + } + if (((align & 3) != 0) || (slack != 0) || (tDistrKey && (align != 0))) { - ((Uint32*)tempData)[attributeSize >> 2] = 0; - memcpy(&tempData[0], aValue, attributeSize); - aValue = (char*)&tempData[0]; + char * tmp = (char*)tempData; + memcpy(tmp, aValue, attributeSize); + aValue = tmp; + bzero(tmp + attributeSize, slack); }//if } - Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word - if (true){ //tArraySize != 0) { Uint32 tTupKeyLen = theTupKeyLen; -- cgit v1.2.1 From d54f77eb628b23575a5e1fdbd8c7d77dad3bc4ab Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 26 Oct 2007 09:06:18 +0200 Subject: post merge weirdness --- mysql-test/suite/ndb/r/ndb_basic.result | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mysql-test/suite/ndb/r/ndb_basic.result b/mysql-test/suite/ndb/r/ndb_basic.result index 38273230f19..9f4f8c0755c 100644 --- a/mysql-test/suite/ndb/r/ndb_basic.result +++ b/mysql-test/suite/ndb/r/ndb_basic.result @@ -882,16 +882,16 @@ replace into t1 values ('aaa', '-aaa'); replace into t1 values ('bbb', '-bbb'); replace into t1 values ('ccc', '-ccc'); select * from t1 order by 1,2; -a b -a -a -aa -aa -aaa -aaa -b -b -bb -bb -bbb -bbb -c -c -cc -cc -ccc -ccc +a b +a -a +aa -aa +aaa -aaa +b -b +bb -bb +bbb -bbb +c -c +cc -cc +ccc -ccc drop table t1; End of 5.0 tests CREATE TABLE t1 (a VARCHAR(255) NOT NULL, -- cgit v1.2.1 From de0780e00a184620f1831a5035fded5f314312d4 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 1 Nov 2007 15:08:00 +0100 Subject: Bug #31484 Cluster LOST_EVENTS entry not added to binlog on mysqld restart. --- mysql-test/include/have_multi_ndb.inc | 12 ++-------- mysql-test/suite/ndb/r/ndb_multi_row.result | 1 + mysql-test/suite/ndb/t/ndb_multi_row.test | 3 ++- sql/ha_ndbcluster.cc | 2 +- sql/ha_ndbcluster_binlog.cc | 35 +++++++++++++++++++++++++---- 5 files changed, 37 insertions(+), 16 deletions(-) diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index deda22b64c0..dd6e609f130 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -5,10 +5,6 @@ connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); # Check that server1 has NDB support connection server1; disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; --require r/true.require select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; --source include/ndb_not_readonly.inc @@ -17,14 +13,10 @@ enable_query_log; # Check that server2 has NDB support connection server2; disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; --require r/true.require select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; --source include/ndb_not_readonly.inc enable_query_log; -# Set the default connection to 'server1' -connection server1; +# Set the default connection +connection default; diff --git a/mysql-test/suite/ndb/r/ndb_multi_row.result b/mysql-test/suite/ndb/r/ndb_multi_row.result index cf5a76d6f01..3d34b16a1a8 100644 --- a/mysql-test/suite/ndb/r/ndb_multi_row.result +++ b/mysql-test/suite/ndb/r/ndb_multi_row.result @@ -1,4 +1,5 @@ drop table if exists t1, t2, t3, t4; +flush status; drop table if exists t1, t2, t3, t4; flush status; create table t1 (a int) engine=ndbcluster; diff --git a/mysql-test/suite/ndb/t/ndb_multi_row.test b/mysql-test/suite/ndb/t/ndb_multi_row.test index c82307839f4..26953093ed0 100644 --- a/mysql-test/suite/ndb/t/ndb_multi_row.test +++ b/mysql-test/suite/ndb/t/ndb_multi_row.test @@ -6,11 +6,12 @@ --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; +flush status; connection server1; drop table if exists t1, t2, t3, t4; +flush status; --enable_warnings -flush status; # Create test tables on server1 create table t1 (a int) engine=ndbcluster; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a799dd4841b..2294d836854 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -618,7 +618,7 @@ bool ha_ndbcluster::get_error_message(int error, DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - Ndb *ndb= get_ndb(); + Ndb *ndb= check_ndb_in_thd(current_thd); if (!ndb) DBUG_RETURN(FALSE); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 5d5c8a26447..75e9937ecd6 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3587,6 +3587,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) Thd_ndb *thd_ndb=0; int ndb_update_ndb_binlog_index= 1; injector *inj= injector::instance(); + uint incident_id= 0; #ifdef RUN_NDB_BINLOG_TIMER Timer main_timer; @@ -3692,17 +3693,43 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) pthread_mutex_unlock(&injector_mutex); pthread_cond_signal(&injector_cond); + /* + wait for mysql server to start (so that the binlog is started + and thus can receive the first GAP event) + */ + pthread_mutex_lock(&LOCK_server_started); + while (!mysqld_server_started) + { + struct timespec abstime; + set_timespec(abstime, 1); + pthread_cond_timedwait(&COND_server_started, &LOCK_server_started, + &abstime); + if (ndbcluster_terminating) + { + pthread_mutex_unlock(&LOCK_server_started); + pthread_mutex_lock(&LOCK_ndb_util_thread); + goto err; + } + } + pthread_mutex_unlock(&LOCK_server_started); restart: /* Main NDB Injector loop */ { /* - Always insert a GAP event as we cannot know what has happened in the cluster - while not being connected. + Always insert a GAP event as we cannot know what has happened + in the cluster while not being connected. */ - LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") }; - inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg); + LEX_STRING const msg[2]= + { + { C_STRING_WITH_LEN("mysqld startup") }, + { C_STRING_WITH_LEN("cluster disconnect")} + }; + IF_DBUG(int error=) + inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]); + DBUG_ASSERT(!error); + incident_id= 1; } { thd->proc_info= "Waiting for ndbcluster to start"; -- cgit v1.2.1 From de3ff607e9dc624eb72d34bcfaf4bbb6765b6e49 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 1 Nov 2007 20:00:44 +0100 Subject: Bug #31484 Cluster LOST_EVENTS entry not added to binlog on mysqld restart - correction --- mysql-test/suite/ndb/r/ndb_multi.result | 5 +++-- mysql-test/suite/ndb/t/ndb_multi.test | 6 ++++-- sql/ha_ndbcluster_binlog.cc | 1 + 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/ndb/r/ndb_multi.result b/mysql-test/suite/ndb/r/ndb_multi.result index 98c4265b833..40483887919 100644 --- a/mysql-test/suite/ndb/r/ndb_multi.result +++ b/mysql-test/suite/ndb/r/ndb_multi.result @@ -1,4 +1,5 @@ drop table if exists t1, t2, t3, t4; +flush status; drop table if exists t1, t2, t3, t4; flush status; create table t1 (a int) engine=ndbcluster; @@ -132,11 +133,11 @@ master_epoch, count)) engine ndb; show tables like '%$%'; Tables_in_test (%$%) -t1$EX +t1$ex use test; show tables like '%$%'; Tables_in_test (%$%) -t1$EX +t1$ex drop table `test`.`t1$EX`; show tables like '%$%'; Tables_in_test (%$%) diff --git a/mysql-test/suite/ndb/t/ndb_multi.test b/mysql-test/suite/ndb/t/ndb_multi.test index ce7e22b3b7f..e033ad1e479 100644 --- a/mysql-test/suite/ndb/t/ndb_multi.test +++ b/mysql-test/suite/ndb/t/ndb_multi.test @@ -4,11 +4,11 @@ --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; +flush status; connection server1; drop table if exists t1, t2, t3, t4; ---enable_warnings - flush status; +--enable_warnings # Create test tables on server1 create table t1 (a int) engine=ndbcluster; @@ -139,9 +139,11 @@ create table `test`.`t1$EX` # check that table shows up ok on both servers # before bugfix table would not show up on server2 +--replace_regex /EX/ex/ show tables like '%$%'; connection server2; use test; +--replace_regex /EX/ex/ show tables like '%$%'; # check cleanup diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 75e9937ecd6..48973f3e27e 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3716,6 +3716,7 @@ restart: /* Main NDB Injector loop */ + if (ndb_binlog_running) { /* Always insert a GAP event as we cannot know what has happened -- cgit v1.2.1 From e93efd3d94df55ff552b397a74f10c3b9962fc08 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 2 Nov 2007 07:14:56 +0100 Subject: make sure everything is clean before starting the test --- mysql-test/include/have_multi_ndb.inc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index dd6e609f130..c1965203492 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -18,5 +18,25 @@ select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schem --source include/ndb_not_readonly.inc enable_query_log; +# cleanup + +connection server1; +disable_query_log; +disable_warnings; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +flush tables; +flush status; +enable_warnings; +enable_query_log; + +connection server2; +disable_query_log; +disable_warnings; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +flush tables; +flush status; +enable_warnings; +enable_query_log; + # Set the default connection connection default; -- cgit v1.2.1 From 11212214c1e6d3d9e7eeb4e34d3491f7afc4d751 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 2 Nov 2007 17:34:06 +0100 Subject: workaround for some bug to investigate --- mysql-test/include/have_multi_ndb.inc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index c1965203492..e8f66afe5bf 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -23,6 +23,7 @@ enable_query_log; connection server1; disable_query_log; disable_warnings; +--error 0,1051 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; flush tables; flush status; @@ -32,6 +33,7 @@ enable_query_log; connection server2; disable_query_log; disable_warnings; +--error 0,1051 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; flush tables; flush status; -- cgit v1.2.1 From f47cd7a0e687a5f647df3ae271515c315bf45d22 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 2 Nov 2007 23:28:29 +0100 Subject: reverting to old setting --- mysql-test/include/have_multi_ndb.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index e8f66afe5bf..9779f181191 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -41,4 +41,4 @@ enable_warnings; enable_query_log; # Set the default connection -connection default; +connection server1; -- cgit v1.2.1 From 3df880241cd49b062930f800c6fcaeae6f5ffee8 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 2 Nov 2007 23:44:17 +0100 Subject: this error should not be printed --- sql/ha_ndbcluster_binlog.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 48973f3e27e..5fc8781edfe 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -779,6 +779,7 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, 701, + 702, 4009, 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); @@ -839,6 +840,7 @@ static int ndbcluster_create_schema_table(THD *thd) const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, 701, + 702, 4009, 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); -- cgit v1.2.1 From 17a8a0b0db188b5a506ecb63916411923df9492e Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 3 Nov 2007 13:46:49 +0100 Subject: compile error --- sql/ha_ndbcluster_binlog.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 5fc8781edfe..9465c5dbb3c 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -777,7 +777,7 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); - const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR, 701, 702, 4009, @@ -838,7 +838,7 @@ static int ndbcluster_create_schema_table(THD *thd) " type INT UNSIGNED NOT NULL," " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); - const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR, 701, 702, 4009, -- cgit v1.2.1 From f7ee88253a7d4620de3651c3c4d7cb7a3ab3d8ac Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 5 Nov 2007 20:18:22 +0100 Subject: ha_ndbcluster.h: Bug #31956 auto increment bugs in MySQL Cluster: Added utility method and constant for internal prefetch default ndb_auto_increment.result: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/r/ndb_auto_increment.result mysqld.cc: Bug #25176 Trying to set ndb_autoincrement_prefetch_sz always fails: Changed pointer to max value Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement) ndb_insert.test, ndb_insert.result: Moved auto_increment tests to ndb_auto_increment.test ndb_auto_increment.test: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/t/ndb_auto_increment.test ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement), added handling of updates of pk/unique key with auto_increment Bug #32055 Cluster does not handle auto inc correctly with insert ignore statement sql/mysqld.cc: Bug #25176 Trying to set ndb_autoincrement_prefetch_sz always fails: Changed pointer to max value Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement) sql/ha_ndbcluster.h: Bug #31956 auto increment bugs in MySQL Cluster: Added utility method and constant for internal prefetch default sql/ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement), added handling of updates of pk/unique key with auto_increment Bug #32055 Cluster does not handle auto inc correctly with insert ignore statement mysql-test/r/ndb_auto_increment.result: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/r/ndb_auto_increment.result mysql-test/t/ndb_auto_increment.test: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/t/ndb_auto_increment.test mysql-test/t/ndb_insert.test: Moved auto_increment tests to ndb_auto_increment.test mysql-test/r/ndb_insert.result: Moved auto_increment tests to ndb_auto_increment.test --- mysql-test/r/ndb_auto_increment.result | 445 +++++++++++++++++++++++++++++++++ mysql-test/r/ndb_insert.result | 169 ------------- mysql-test/t/ndb_auto_increment.test | 294 ++++++++++++++++++++++ mysql-test/t/ndb_insert.test | 138 ---------- sql/ha_ndbcluster.cc | 84 +++++-- sql/ha_ndbcluster.h | 3 + sql/mysqld.cc | 4 +- 7 files changed, 806 insertions(+), 331 deletions(-) create mode 100644 mysql-test/r/ndb_auto_increment.result create mode 100644 mysql-test/t/ndb_auto_increment.test diff --git a/mysql-test/r/ndb_auto_increment.result b/mysql-test/r/ndb_auto_increment.result new file mode 100644 index 00000000000..b7c9fa8e2b5 --- /dev/null +++ b/mysql-test/r/ndb_auto_increment.result @@ -0,0 +1,445 @@ +DROP TABLE IF EXISTS t1,t2; +DROP TABLE IF EXISTS t1; +set @old_auto_increment_offset = @@session.auto_increment_offset; +set @old_auto_increment_increment = @@session.auto_increment_increment; +set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; +flush status; +create table t1 (a int not null auto_increment primary key) engine ndb; +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +update t1 set a = 5 where a = 1; +insert into t1 values (NULL); +select * from t1 order by a; +a +5 +6 +insert into t1 values (7); +insert into t1 values (NULL); +select * from t1 order by a; +a +5 +6 +7 +8 +insert into t1 values (2); +insert into t1 values (NULL); +select * from t1 order by a; +a +2 +5 +6 +7 +8 +9 +update t1 set a = 4 where a = 2; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +10 +delete from t1 where a = 10; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +replace t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +replace t1 values (15); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +replace into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +replace t1 values (15); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +insert ignore into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +17 +insert ignore into t1 values (15), (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +17 +18 +insert into t1 values (15) +on duplicate key update a = 20; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +insert into t1 values (NULL) on duplicate key update a = 30; +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +insert into t1 values (30) on duplicate key update a = 40; +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +30 +insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +30 +600 +601 +602 +610 +611 +drop table t1; +create table t1 (a int not null primary key, +b int not null unique auto_increment) engine ndb; +insert into t1 values (1, NULL); +insert into t1 values (3, NULL); +update t1 set b = 3 where a = 3; +insert into t1 values (4, NULL); +select * from t1 order by a; +a b +1 1 +3 3 +4 4 +drop table t1; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +1 1 0 +11 2 1 +21 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_offset=5; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +27 4 3 +35 5 4 +99 6 5 +105 7 6 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +7 +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_increment=2; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +1 1 0 +3 2 1 +5 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +7 1 0 +8 2 1 +9 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 3; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +15 1 0 +25 2 1 +35 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 5; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 100; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +105 1 0 +115 2 1 +125 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +drop table if exists t1; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +create table t1 (a int not null auto_increment primary key) engine ndb; +insert into t1 values (NULL); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +33 +insert into t1 values (20); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +20 +33 +34 +insert into t1 values (35); +insert into t1 values (NULL); +insert into t1 values (NULL); +ERROR 23000: Duplicate entry '35' for key 1 +select * from t1 order by a; +a +1 +20 +21 +33 +34 +35 +insert into t1 values (100); +insert into t1 values (NULL); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +20 +21 +22 +33 +34 +35 +100 +101 +set auto_increment_offset = @old_auto_increment_offset; +set auto_increment_increment = @old_auto_increment_increment; +set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; +drop table t1; diff --git a/mysql-test/r/ndb_insert.result b/mysql-test/r/ndb_insert.result index b5349ecb59c..e7275bde2b8 100644 --- a/mysql-test/r/ndb_insert.result +++ b/mysql-test/r/ndb_insert.result @@ -657,172 +657,3 @@ a b 2 NULL 3 NULL drop table t1; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -1 1 0 -11 2 1 -21 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_offset=5; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -27 4 3 -35 5 4 -99 6 5 -105 7 6 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -7 -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_increment=2; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -1 1 0 -3 2 1 -5 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -7 1 0 -8 2 1 -9 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 3; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -15 1 0 -25 2 1 -35 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 5; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 100; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -105 1 0 -115 2 1 -125 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; diff --git a/mysql-test/t/ndb_auto_increment.test b/mysql-test/t/ndb_auto_increment.test new file mode 100644 index 00000000000..9801f2de564 --- /dev/null +++ b/mysql-test/t/ndb_auto_increment.test @@ -0,0 +1,294 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +connection server1; +DROP TABLE IF EXISTS t1,t2; +connection server2; +DROP TABLE IF EXISTS t1; +connection server1; +--enable_warnings + +set @old_auto_increment_offset = @@session.auto_increment_offset; +set @old_auto_increment_increment = @@session.auto_increment_increment; +set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; + +flush status; + +create table t1 (a int not null auto_increment primary key) engine ndb; + +# Step 1: Verify simple insert +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 2: Verify simple update with higher than highest value causes +# next insert to use updated_value + 1 +update t1 set a = 5 where a = 1; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 3: Verify insert that inserts higher than highest value causes +# next insert to use inserted_value + 1 +insert into t1 values (7); +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 4: Verify that insert into hole, lower than highest value doesn't +# affect next insert +insert into t1 values (2); +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 5: Verify that update into hole, lower than highest value doesn't +# affect next insert +update t1 set a = 4 where a = 2; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 6: Verify that delete of highest value doesn't cause the next +# insert to reuse this value +delete from t1 where a = 10; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 7: Verify that REPLACE has the same effect as INSERT +replace t1 values (NULL); +select * from t1 order by a; +replace t1 values (15); +select * from t1 order by a; +replace into t1 values (NULL); +select * from t1 order by a; + +# Step 8: Verify that REPLACE has the same effect as UPDATE +replace t1 values (15); +select * from t1 order by a; + +# Step 9: Verify that IGNORE doesn't affect auto_increment +insert ignore into t1 values (NULL); +select * from t1 order by a; +insert ignore into t1 values (15), (NULL); +select * from t1 order by a; + +# Step 10: Verify that on duplicate key as UPDATE behaves as an +# UPDATE +insert into t1 values (15) +on duplicate key update a = 20; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 11: Verify that on duplicate key as INSERT behaves as INSERT +insert into t1 values (NULL) on duplicate key update a = 30; +select * from t1 order by a; +insert into t1 values (30) on duplicate key update a = 40; +select * from t1 order by a; + +#Step 12: Vefify INSERT IGNORE (bug#32055) +insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); +select * from t1 order by a; +drop table t1; + +#Step 13: Verify auto_increment of unique key +create table t1 (a int not null primary key, + b int not null unique auto_increment) engine ndb; +insert into t1 values (1, NULL); +insert into t1 values (3, NULL); +update t1 set b = 3 where a = 3; +insert into t1 values (4, NULL); +select * from t1 order by a; +drop table t1; + +#Step 14: Verify that auto_increment_increment and auto_increment_offset +# work as expected + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM; + +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_offset=5; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_increment=2; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; + +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 3; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 5; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 100; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +#Step 15: Now verify that behaviour on multiple MySQL Servers behave +# properly. Start by dropping table and recreating it to start +# counters and id caches from zero again. +--disable_warnings +connection server2; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +drop table if exists t1; +connection server1; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +--enable_warnings + + +create table t1 (a int not null auto_increment primary key) engine ndb; +# Basic test, ensure that the second server gets a new range. +#Generate record with key = 1 +insert into t1 values (NULL); +connection server2; +#Generate record with key = 33 +insert into t1 values (NULL); +connection server1; +select * from t1 order by a; + +#This insert should not affect the range of the second server +insert into t1 values (20); +connection server2; +insert into t1 values (NULL); +select * from t1 order by a; + +connection server1; +#This insert should remove cached values but also skip values already +#taken by server2, given that there is no method of communicating with +#the other server it should also cause a conflict +connection server1; + +insert into t1 values (35); +insert into t1 values (NULL); +connection server2; +--error ER_DUP_ENTRY +insert into t1 values (NULL); +select * from t1 order by a; + +insert into t1 values (100); +insert into t1 values (NULL); +connection server1; +insert into t1 values (NULL); +select * from t1 order by a; + +set auto_increment_offset = @old_auto_increment_offset; +set auto_increment_increment = @old_auto_increment_increment; +set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; + +drop table t1; diff --git a/mysql-test/t/ndb_insert.test b/mysql-test/t/ndb_insert.test index b8f00d6f6aa..f85d193f0d4 100644 --- a/mysql-test/t/ndb_insert.test +++ b/mysql-test/t/ndb_insert.test @@ -638,142 +638,4 @@ create table t1(a int primary key, b int, unique key(b)) engine=ndb; insert ignore into t1 values (1,0), (2,0), (2,null), (3,null); select * from t1 order by a; drop table t1; - -# Bug#26342 auto_increment_increment AND auto_increment_offset REALLY REALLY anger NDB cluster - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM; - -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_offset=5; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_increment=2; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; - -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 3; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 5; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 100; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - # End of 4.1 tests diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 00e4621ec1a..de6187da70f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2259,6 +2259,25 @@ int ha_ndbcluster::full_table_scan(byte *buf) DBUG_RETURN(next_result(buf)); } +int +ha_ndbcluster::set_auto_inc(Field *field) +{ + Ndb *ndb= get_ndb(); + Uint64 next_val= (Uint64) field->val_int() + 1; + DBUG_ENTER("ha_ndbcluster::set_auto_inc"); +#ifndef DBUG_OFF + char buff[22]; + DBUG_PRINT("info", + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); +#endif + if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); + DBUG_RETURN(0); +} + + /* Insert one record into NDB */ @@ -2413,17 +2432,11 @@ int ha_ndbcluster::write_row(byte *record) } if ((has_auto_increment) && (m_skip_auto_increment)) { - Ndb *ndb= get_ndb(); - Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; -#ifndef DBUG_OFF - char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(next_val, buff))); -#endif - if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); + int ret_val; + if ((ret_val= set_auto_inc(table->next_number_field))) + { + DBUG_RETURN(ret_val); + } } m_skip_auto_increment= TRUE; @@ -2476,6 +2489,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; uint i; + int auto_res; bool pk_update= (table->s->primary_key != MAX_KEY && key_cmp(table->s->primary_key, old_data, new_data)); DBUG_ENTER("update_row"); @@ -2531,6 +2545,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) // Insert new row DBUG_PRINT("info", ("delete succeded")); m_primary_key_update= TRUE; + /* + If we are updating a primary key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + table->found_next_number_field->query_id == thd->query_id && + (auto_res= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(auto_res); + } insert_res= write_row(new_data); m_primary_key_update= FALSE; if (insert_res) @@ -2553,7 +2577,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_RETURN(0); } - + /* + If we are updating a unique key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + table->found_next_number_field->query_id == thd->query_id && + (auto_res= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(auto_res); + } if (cursor) { /* @@ -3841,9 +3874,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // store thread specific data first to set the right context m_force_send= thd->variables.ndb_force_send; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; - m_autoincrement_prefetch= - (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; - + m_autoincrement_prefetch= + (thd->variables.ndb_autoincrement_prefetch_sz > + NDB_DEFAULT_AUTO_PREFETCH) ? + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz + : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH; m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; DBUG_ASSERT(m_active_trans); // Start of transaction @@ -4868,8 +4903,9 @@ ulonglong ha_ndbcluster::get_auto_increment() { int cache_size; Uint64 auto_value; - Uint64 step= current_thd->variables.auto_increment_increment; - Uint64 start= current_thd->variables.auto_increment_offset; + THD *thd= current_thd; + Uint64 step= thd->variables.auto_increment_increment; + Uint64 start= thd->variables.auto_increment_offset; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); @@ -4879,11 +4915,15 @@ ulonglong ha_ndbcluster::get_auto_increment() /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } + int remaining= m_rows_to_insert - m_rows_inserted; + int min_prefetch= + (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? + thd->variables.ndb_autoincrement_prefetch_sz + : remaining; cache_size= - (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted : - ((m_rows_to_insert > m_autoincrement_prefetch) ? - m_rows_to_insert : m_autoincrement_prefetch)); + (int) ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) @@ -4953,7 +4993,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_dupkey((uint) -1), m_ha_not_exact_count(FALSE), m_force_send(TRUE), - m_autoincrement_prefetch((ha_rows) 32), + m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH), m_transaction_on(TRUE), m_cond(NULL), m_multi_cursor(NULL) diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 324969ad374..0694de55c53 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -27,6 +27,8 @@ #include #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 +#define NDB_DEFAULT_AUTO_PREFETCH 32 + /* Forward declarations */ class Ndb; class NdbOperation; @@ -271,6 +273,7 @@ private: int full_table_scan(byte * buf); int fetch_next(NdbScanOperation* op); int next_result(byte *buf); + int set_auto_inc(Field *field); int define_read_attrs(byte* buf, NdbOperation* op); int filtered_scan(const byte *key, uint key_len, byte *buf, diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 051bad5b310..d7cb4414d2b 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5282,8 +5282,8 @@ Disable with --skip-ndbcluster (will save memory).", {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, "Specify number of autoincrement values that are prefetched.", (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, - (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, - 0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, + (gptr*) &max_system_variables.ndb_autoincrement_prefetch_sz, + 0, GET_ULONG, REQUIRED_ARG, 1, 1, 256, 0, 0, 0}, {"ndb-force-send", OPT_NDB_FORCE_SEND, "Force send of buffers to ndb immediately without waiting for " "other threads.", -- cgit v1.2.1 From ac7e0c857f7d44cb3d7b8d7696dc5c4d044167f6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 5 Nov 2007 23:23:26 +0100 Subject: preserve more of the state --- sql/ha_ndbcluster_binlog.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 9465c5dbb3c..93f7027d788 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -243,7 +243,8 @@ static void run_query(THD *thd, char *buf, char *end, { ulong save_query_length= thd->query_length; char *save_query= thd->query; - ulong save_thread_id= thd->variables.pseudo_thread_id; + struct system_variables save_variables= thd->variables; + struct system_status_var save_status_var= thd->status_var; ulonglong save_thd_options= thd->options; DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); NET save_net= thd->net; @@ -277,7 +278,8 @@ static void run_query(THD *thd, char *buf, char *end, thd->options= save_thd_options; thd->query_length= save_query_length; thd->query= save_query; - thd->variables.pseudo_thread_id= save_thread_id; + thd->variables= save_variables; + thd->status_var= save_status_var; thd->net= save_net; if (thd == injector_thd) -- cgit v1.2.1 From ea9b647d6922f3bfccdfc6966783b76ba0edc9fb Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 6 Nov 2007 10:27:56 +0100 Subject: break out tuple data read sql/ha_ndbcluster_binlog.cc: more state saving storage/ndb/tools/restore/Restore.cpp: ndb: break out tuple data read for ndb_restore more save state in run query --- sql/ha_ndbcluster_binlog.cc | 25 +++--- storage/ndb/tools/restore/Restore.cpp | 156 ++++++++++++++++++---------------- storage/ndb/tools/restore/Restore.hpp | 4 + 3 files changed, 103 insertions(+), 82 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 93f7027d788..4bb17dffa4a 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -241,19 +241,22 @@ static void dbug_print_table(const char *info, TABLE *table) static void run_query(THD *thd, char *buf, char *end, const int *no_print_error, my_bool disable_binlog) { - ulong save_query_length= thd->query_length; - char *save_query= thd->query; - struct system_variables save_variables= thd->variables; - struct system_status_var save_status_var= thd->status_var; + ulong save_thd_query_length= thd->query_length; + char *save_thd_query= thd->query; + struct system_variables save_thd_variables= thd->variables; + struct system_status_var save_thd_status_var= thd->status_var; + THD_TRANS save_thd_transaction_all= thd->transaction.all; + THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt; ulonglong save_thd_options= thd->options; DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); - NET save_net= thd->net; + NET save_thd_net= thd->net; const char* found_semicolon= NULL; bzero((char*) &thd->net, sizeof(NET)); thd->query_length= end - buf; thd->query= buf; thd->variables.pseudo_thread_id= thread_id; + thd->transaction.stmt.modified_non_trans_table= FALSE; if (disable_binlog) thd->options&= ~OPTION_BIN_LOG; @@ -276,11 +279,13 @@ static void run_query(THD *thd, char *buf, char *end, } thd->options= save_thd_options; - thd->query_length= save_query_length; - thd->query= save_query; - thd->variables= save_variables; - thd->status_var= save_status_var; - thd->net= save_net; + thd->query_length= save_thd_query_length; + thd->query= save_thd_query; + thd->variables= save_thd_variables; + thd->status_var= save_thd_status_var; + thd->transaction.all= save_thd_transaction_all; + thd->transaction.stmt= save_thd_transaction_stmt; + thd->net= save_thd_net; if (thd == injector_thd) { diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index a7d8a9d10d9..f599bb21978 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -534,6 +534,88 @@ TupleS::prepareRecord(TableS & tab){ return true; } +int +RestoreDataIterator::readTupleData(Uint32 *buf_ptr, Uint32 *ptr, + Uint32 dataLength) +{ + while (ptr + 2 < buf_ptr + dataLength) + { + typedef BackupFormat::DataFile::VariableData VarData; + VarData * data = (VarData *)ptr; + Uint32 sz = ntohl(data->Sz); + Uint32 attrId = ntohl(data->Id); // column_no + + AttributeData * attr_data = m_tuple.getData(attrId); + const AttributeDesc * attr_desc = m_tuple.getDesc(attrId); + + // just a reminder - remove when backwards compat implemented + if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3) && + attr_desc->m_column->getNullable()) + { + const Uint32 ind = attr_desc->m_nullBitIndex; + if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize, + buf_ptr,ind)) + { + attr_data->null = true; + attr_data->void_value = NULL; + continue; + } + } + + if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3)) + { + sz *= 4; + } + + attr_data->null = false; + attr_data->void_value = &data->Data[0]; + attr_data->size = sz; + + //if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; } + + /** + * Compute array size + */ + const Uint32 arraySize = sz / (attr_desc->size / 8); + assert(arraySize <= attr_desc->arraySize); + + //convert the length of blob(v1) and text(v1) + if(!m_hostByteOrder + && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob + || attr_desc->m_column->getType() == NdbDictionary::Column::Text) + && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + + //convert datetime type + if(!m_hostByteOrder + && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + + if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize)) + { + return -1; + } + + ptr += ((sz + 3) >> 2) + 2; + } + + assert(ptr == buf_ptr + dataLength); + + return 0; +} + const TupleS * RestoreDataIterator::getNextTuple(int & res) { @@ -630,78 +712,8 @@ RestoreDataIterator::getNextTuple(int & res) attr_data->void_value = NULL; } - while (ptr + 2 < buf_ptr + dataLength) { - typedef BackupFormat::DataFile::VariableData VarData; - VarData * data = (VarData *)ptr; - Uint32 sz = ntohl(data->Sz); - Uint32 attrId = ntohl(data->Id); // column_no - - AttributeData * attr_data = m_tuple.getData(attrId); - const AttributeDesc * attr_desc = m_tuple.getDesc(attrId); - - // just a reminder - remove when backwards compat implemented - if(m_currentTable->backupVersion < MAKE_VERSION(5,1,3) && - attr_desc->m_column->getNullable()){ - const Uint32 ind = attr_desc->m_nullBitIndex; - if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize, - buf_ptr,ind)){ - attr_data->null = true; - attr_data->void_value = NULL; - continue; - } - } - - if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3)) - { - sz *= 4; - } - - attr_data->null = false; - attr_data->void_value = &data->Data[0]; - attr_data->size = sz; - - //if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; } - - /** - * Compute array size - */ - const Uint32 arraySize = sz / (attr_desc->size / 8); - assert(arraySize <= attr_desc->arraySize); - - //convert the length of blob(v1) and text(v1) - if(!m_hostByteOrder - && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob - || attr_desc->m_column->getType() == NdbDictionary::Column::Text) - && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed) - { - char* p = (char*)&attr_data->u_int64_value[0]; - Uint64 x; - memcpy(&x, p, sizeof(Uint64)); - x = Twiddle64(x); - memcpy(p, &x, sizeof(Uint64)); - } - - //convert datetime type - if(!m_hostByteOrder - && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime) - { - char* p = (char*)&attr_data->u_int64_value[0]; - Uint64 x; - memcpy(&x, p, sizeof(Uint64)); - x = Twiddle64(x); - memcpy(p, &x, sizeof(Uint64)); - } - - if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize)) - { - res = -1; - return NULL; - } - - ptr += ((sz + 3) >> 2) + 2; - } - - assert(ptr == buf_ptr + dataLength); + if ((res = readTupleData(buf_ptr, ptr, dataLength))) + return NULL; m_count ++; res = 0; diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 5455fa17aa0..f6de9245509 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -355,6 +355,10 @@ public: bool validateFragmentFooter(); const TupleS *getNextTuple(int & res); + +private: + + int readTupleData(Uint32 *buf_ptr, Uint32 *ptr, Uint32 dataLength); }; class LogEntry { -- cgit v1.2.1 From 404d52fc97c9021fdab6b9ada2e91e48f0caf9dd Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 6 Nov 2007 10:28:11 +0100 Subject: Removed compiler warnings --- sql/ha_ndbcluster.cc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index de6187da70f..d29e9345c11 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4901,7 +4901,7 @@ int ha_ndbcluster::drop_table() ulonglong ha_ndbcluster::get_auto_increment() { - int cache_size; + uint cache_size; Uint64 auto_value; THD *thd= current_thd; Uint64 step= thd->variables.auto_increment_increment; @@ -4915,15 +4915,14 @@ ulonglong ha_ndbcluster::get_auto_increment() /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } - int remaining= m_rows_to_insert - m_rows_inserted; - int min_prefetch= + uint remaining= m_rows_to_insert - m_rows_inserted; + uint min_prefetch= (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? thd->variables.ndb_autoincrement_prefetch_sz : remaining; - cache_size= - (int) ((remaining < m_autoincrement_prefetch) ? - min_prefetch - : remaining); + cache_size= ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) -- cgit v1.2.1 From 539682ceba64841bcaf12b0db668ed3e309058a1 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 6 Nov 2007 10:57:49 +0100 Subject: ndb_auto_increment.result: Rename: mysql-test/r/ndb_auto_increment.result -> mysql-test/suite/ndb/r/ndb_auto_increment.result ndb_auto_increment.test, ndb_auto_increment.result: Bug #31956 auto increment bugs in MySQL Cluster: Adapted test cases ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Merging from 5.0 ndb_auto_increment.test: Rename: mysql-test/t/ndb_auto_increment.test -> mysql-test/suite/ndb/t/ndb_auto_increment.test sql/ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Merging from 5.0 mysql-test/suite/ndb/t/ndb_auto_increment.test: Bug #31956 auto increment bugs in MySQL Cluster: Adapted test cases mysql-test/suite/ndb/r/ndb_auto_increment.result: Bug #31956 auto increment bugs in MySQL Cluster: Adapted test cases --- mysql-test/r/ndb_auto_increment.result | 445 ----------------------- mysql-test/suite/ndb/r/ndb_auto_increment.result | 445 +++++++++++++++++++++++ mysql-test/suite/ndb/t/ndb_auto_increment.test | 293 +++++++++++++++ mysql-test/t/ndb_auto_increment.test | 294 --------------- sql/ha_ndbcluster.cc | 89 +++-- 5 files changed, 804 insertions(+), 762 deletions(-) delete mode 100644 mysql-test/r/ndb_auto_increment.result create mode 100644 mysql-test/suite/ndb/r/ndb_auto_increment.result create mode 100644 mysql-test/suite/ndb/t/ndb_auto_increment.test delete mode 100644 mysql-test/t/ndb_auto_increment.test diff --git a/mysql-test/r/ndb_auto_increment.result b/mysql-test/r/ndb_auto_increment.result deleted file mode 100644 index b7c9fa8e2b5..00000000000 --- a/mysql-test/r/ndb_auto_increment.result +++ /dev/null @@ -1,445 +0,0 @@ -DROP TABLE IF EXISTS t1,t2; -DROP TABLE IF EXISTS t1; -set @old_auto_increment_offset = @@session.auto_increment_offset; -set @old_auto_increment_increment = @@session.auto_increment_increment; -set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; -flush status; -create table t1 (a int not null auto_increment primary key) engine ndb; -insert into t1 values (NULL); -select * from t1 order by a; -a -1 -update t1 set a = 5 where a = 1; -insert into t1 values (NULL); -select * from t1 order by a; -a -5 -6 -insert into t1 values (7); -insert into t1 values (NULL); -select * from t1 order by a; -a -5 -6 -7 -8 -insert into t1 values (2); -insert into t1 values (NULL); -select * from t1 order by a; -a -2 -5 -6 -7 -8 -9 -update t1 set a = 4 where a = 2; -insert into t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -10 -delete from t1 where a = 10; -insert into t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -replace t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -replace t1 values (15); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -15 -replace into t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -15 -16 -replace t1 values (15); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -15 -16 -insert ignore into t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -15 -16 -17 -insert ignore into t1 values (15), (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -15 -16 -17 -18 -insert into t1 values (15) -on duplicate key update a = 20; -insert into t1 values (NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -16 -17 -18 -20 -21 -insert into t1 values (NULL) on duplicate key update a = 30; -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -16 -17 -18 -20 -21 -22 -insert into t1 values (30) on duplicate key update a = 40; -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -16 -17 -18 -20 -21 -22 -30 -insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); -select * from t1 order by a; -a -4 -5 -6 -7 -8 -9 -11 -12 -16 -17 -18 -20 -21 -22 -30 -600 -601 -602 -610 -611 -drop table t1; -create table t1 (a int not null primary key, -b int not null unique auto_increment) engine ndb; -insert into t1 values (1, NULL); -insert into t1 values (3, NULL); -update t1 set b = 3 where a = 3; -insert into t1 values (4, NULL); -select * from t1 order by a; -a b -1 1 -3 3 -4 4 -drop table t1; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -1 1 0 -11 2 1 -21 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_offset=5; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -27 4 3 -35 5 4 -99 6 5 -105 7 6 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -7 -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_increment=2; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -1 1 0 -3 2 1 -5 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -7 1 0 -8 2 1 -9 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 3; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -15 1 0 -25 2 1 -35 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 5; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -5 1 0 -15 2 1 -25 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -CREATE TABLE t1 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; -CREATE TABLE t2 ( -pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, -b INT NOT NULL, -c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 100; -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -pk b c -105 1 0 -115 2 1 -125 3 2 -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -COUNT(t1.pk) -3 -DROP TABLE t1, t2; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -set ndb_autoincrement_prefetch_sz = 32; -drop table if exists t1; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -set ndb_autoincrement_prefetch_sz = 32; -create table t1 (a int not null auto_increment primary key) engine ndb; -insert into t1 values (NULL); -insert into t1 values (NULL); -select * from t1 order by a; -a -1 -33 -insert into t1 values (20); -insert into t1 values (NULL); -select * from t1 order by a; -a -1 -20 -33 -34 -insert into t1 values (35); -insert into t1 values (NULL); -insert into t1 values (NULL); -ERROR 23000: Duplicate entry '35' for key 1 -select * from t1 order by a; -a -1 -20 -21 -33 -34 -35 -insert into t1 values (100); -insert into t1 values (NULL); -insert into t1 values (NULL); -select * from t1 order by a; -a -1 -20 -21 -22 -33 -34 -35 -100 -101 -set auto_increment_offset = @old_auto_increment_offset; -set auto_increment_increment = @old_auto_increment_increment; -set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; -drop table t1; diff --git a/mysql-test/suite/ndb/r/ndb_auto_increment.result b/mysql-test/suite/ndb/r/ndb_auto_increment.result new file mode 100644 index 00000000000..5740ed38242 --- /dev/null +++ b/mysql-test/suite/ndb/r/ndb_auto_increment.result @@ -0,0 +1,445 @@ +DROP TABLE IF EXISTS t1,t2; +DROP TABLE IF EXISTS t1; +set @old_auto_increment_offset = @@session.auto_increment_offset; +set @old_auto_increment_increment = @@session.auto_increment_increment; +set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; +flush status; +create table t1 (a int not null auto_increment primary key) engine ndb; +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +update t1 set a = 5 where a = 1; +insert into t1 values (NULL); +select * from t1 order by a; +a +5 +6 +insert into t1 values (7); +insert into t1 values (NULL); +select * from t1 order by a; +a +5 +6 +7 +8 +insert into t1 values (2); +insert into t1 values (NULL); +select * from t1 order by a; +a +2 +5 +6 +7 +8 +9 +update t1 set a = 4 where a = 2; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +10 +delete from t1 where a = 10; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +replace t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +replace t1 values (15); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +replace into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +replace t1 values (15); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +insert ignore into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +17 +insert ignore into t1 values (15), (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +15 +16 +17 +18 +insert into t1 values (15) +on duplicate key update a = 20; +insert into t1 values (NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +insert into t1 values (NULL) on duplicate key update a = 30; +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +insert into t1 values (30) on duplicate key update a = 40; +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +30 +insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); +select * from t1 order by a; +a +4 +5 +6 +7 +8 +9 +11 +12 +16 +17 +18 +20 +21 +22 +30 +600 +601 +602 +610 +611 +drop table t1; +create table t1 (a int not null primary key, +b int not null unique auto_increment) engine ndb; +insert into t1 values (1, NULL); +insert into t1 values (3, NULL); +update t1 set b = 3 where a = 3; +insert into t1 values (4, NULL); +select * from t1 order by a; +a b +1 1 +3 3 +4 4 +drop table t1; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +1 1 0 +11 2 1 +21 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_offset=5; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +27 4 3 +35 5 4 +99 6 5 +105 7 6 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +7 +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_increment=2; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +1 1 0 +3 2 1 +5 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +7 1 0 +8 2 1 +9 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 3; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +15 1 0 +25 2 1 +35 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 5; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +5 1 0 +15 2 1 +25 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, +b INT NOT NULL, +c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 100; +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +pk b c +105 1 0 +115 2 1 +125 3 2 +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +COUNT(t1.pk) +3 +DROP TABLE t1, t2; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +drop table if exists t1; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +create table t1 (a int not null auto_increment primary key) engine ndb; +insert into t1 values (NULL); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +33 +insert into t1 values (20); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +20 +33 +34 +insert into t1 values (35); +insert into t1 values (NULL); +insert into t1 values (NULL); +ERROR 23000: Duplicate entry '35' for key 'PRIMARY' +select * from t1 order by a; +a +1 +20 +21 +33 +34 +35 +insert into t1 values (100); +insert into t1 values (NULL); +insert into t1 values (NULL); +select * from t1 order by a; +a +1 +20 +21 +22 +33 +34 +35 +100 +101 +set auto_increment_offset = @old_auto_increment_offset; +set auto_increment_increment = @old_auto_increment_increment; +set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; +drop table t1; diff --git a/mysql-test/suite/ndb/t/ndb_auto_increment.test b/mysql-test/suite/ndb/t/ndb_auto_increment.test new file mode 100644 index 00000000000..14e7ae7ca7b --- /dev/null +++ b/mysql-test/suite/ndb/t/ndb_auto_increment.test @@ -0,0 +1,293 @@ +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +connection server1; +DROP TABLE IF EXISTS t1,t2; +connection server2; +DROP TABLE IF EXISTS t1; +connection server1; +--enable_warnings + +set @old_auto_increment_offset = @@session.auto_increment_offset; +set @old_auto_increment_increment = @@session.auto_increment_increment; +set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; + +flush status; + +create table t1 (a int not null auto_increment primary key) engine ndb; + +# Step 1: Verify simple insert +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 2: Verify simple update with higher than highest value causes +# next insert to use updated_value + 1 +update t1 set a = 5 where a = 1; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 3: Verify insert that inserts higher than highest value causes +# next insert to use inserted_value + 1 +insert into t1 values (7); +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 4: Verify that insert into hole, lower than highest value doesn't +# affect next insert +insert into t1 values (2); +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 5: Verify that update into hole, lower than highest value doesn't +# affect next insert +update t1 set a = 4 where a = 2; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 6: Verify that delete of highest value doesn't cause the next +# insert to reuse this value +delete from t1 where a = 10; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 7: Verify that REPLACE has the same effect as INSERT +replace t1 values (NULL); +select * from t1 order by a; +replace t1 values (15); +select * from t1 order by a; +replace into t1 values (NULL); +select * from t1 order by a; + +# Step 8: Verify that REPLACE has the same effect as UPDATE +replace t1 values (15); +select * from t1 order by a; + +# Step 9: Verify that IGNORE doesn't affect auto_increment +insert ignore into t1 values (NULL); +select * from t1 order by a; +insert ignore into t1 values (15), (NULL); +select * from t1 order by a; + +# Step 10: Verify that on duplicate key as UPDATE behaves as an +# UPDATE +insert into t1 values (15) +on duplicate key update a = 20; +insert into t1 values (NULL); +select * from t1 order by a; + +# Step 11: Verify that on duplicate key as INSERT behaves as INSERT +insert into t1 values (NULL) on duplicate key update a = 30; +select * from t1 order by a; +insert into t1 values (30) on duplicate key update a = 40; +select * from t1 order by a; + +#Step 12: Vefify INSERT IGNORE (bug#32055) +insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); +select * from t1 order by a; +drop table t1; + +#Step 13: Verify auto_increment of unique key +create table t1 (a int not null primary key, + b int not null unique auto_increment) engine ndb; +insert into t1 values (1, NULL); +insert into t1 values (3, NULL); +update t1 set b = 3 where a = 3; +insert into t1 values (4, NULL); +select * from t1 order by a; +drop table t1; + +#Step 14: Verify that auto_increment_increment and auto_increment_offset +# work as expected + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM; + +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_offset=5; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +TRUNCATE t1; +TRUNCATE t2; +SET @@session.auto_increment_increment=2; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; + +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 3; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 7; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 5; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, + b INT NOT NULL, + c INT NOT NULL UNIQUE +) ENGINE=MYISAM AUTO_INCREMENT = 100; + +SET @@session.auto_increment_offset=5; +SET @@session.auto_increment_increment=10; +INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); +INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); +SELECT * FROM t1 ORDER BY pk; +SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; +DROP TABLE t1, t2; + +#Step 15: Now verify that behaviour on multiple MySQL Servers behave +# properly. Start by dropping table and recreating it to start +# counters and id caches from zero again. +--disable_warnings +connection server2; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +drop table if exists t1; +connection server1; +SET @@session.auto_increment_offset=1; +SET @@session.auto_increment_increment=1; +set ndb_autoincrement_prefetch_sz = 32; +--enable_warnings + + +create table t1 (a int not null auto_increment primary key) engine ndb; +# Basic test, ensure that the second server gets a new range. +#Generate record with key = 1 +insert into t1 values (NULL); +connection server2; +#Generate record with key = 33 +insert into t1 values (NULL); +connection server1; +select * from t1 order by a; + +#This insert should not affect the range of the second server +insert into t1 values (20); +connection server2; +insert into t1 values (NULL); +select * from t1 order by a; + +connection server1; +#This insert should remove cached values but also skip values already +#taken by server2, given that there is no method of communicating with +#the other server it should also cause a conflict +connection server1; + +insert into t1 values (35); +insert into t1 values (NULL); +connection server2; +--error ER_DUP_ENTRY +insert into t1 values (NULL); +select * from t1 order by a; + +insert into t1 values (100); +insert into t1 values (NULL); +connection server1; +insert into t1 values (NULL); +select * from t1 order by a; + +set auto_increment_offset = @old_auto_increment_offset; +set auto_increment_increment = @old_auto_increment_increment; +set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; + +drop table t1; diff --git a/mysql-test/t/ndb_auto_increment.test b/mysql-test/t/ndb_auto_increment.test deleted file mode 100644 index 9801f2de564..00000000000 --- a/mysql-test/t/ndb_auto_increment.test +++ /dev/null @@ -1,294 +0,0 @@ --- source include/have_ndb.inc --- source include/have_multi_ndb.inc --- source include/not_embedded.inc - ---disable_warnings -connection server1; -DROP TABLE IF EXISTS t1,t2; -connection server2; -DROP TABLE IF EXISTS t1; -connection server1; ---enable_warnings - -set @old_auto_increment_offset = @@session.auto_increment_offset; -set @old_auto_increment_increment = @@session.auto_increment_increment; -set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz; - -flush status; - -create table t1 (a int not null auto_increment primary key) engine ndb; - -# Step 1: Verify simple insert -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 2: Verify simple update with higher than highest value causes -# next insert to use updated_value + 1 -update t1 set a = 5 where a = 1; -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 3: Verify insert that inserts higher than highest value causes -# next insert to use inserted_value + 1 -insert into t1 values (7); -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 4: Verify that insert into hole, lower than highest value doesn't -# affect next insert -insert into t1 values (2); -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 5: Verify that update into hole, lower than highest value doesn't -# affect next insert -update t1 set a = 4 where a = 2; -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 6: Verify that delete of highest value doesn't cause the next -# insert to reuse this value -delete from t1 where a = 10; -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 7: Verify that REPLACE has the same effect as INSERT -replace t1 values (NULL); -select * from t1 order by a; -replace t1 values (15); -select * from t1 order by a; -replace into t1 values (NULL); -select * from t1 order by a; - -# Step 8: Verify that REPLACE has the same effect as UPDATE -replace t1 values (15); -select * from t1 order by a; - -# Step 9: Verify that IGNORE doesn't affect auto_increment -insert ignore into t1 values (NULL); -select * from t1 order by a; -insert ignore into t1 values (15), (NULL); -select * from t1 order by a; - -# Step 10: Verify that on duplicate key as UPDATE behaves as an -# UPDATE -insert into t1 values (15) -on duplicate key update a = 20; -insert into t1 values (NULL); -select * from t1 order by a; - -# Step 11: Verify that on duplicate key as INSERT behaves as INSERT -insert into t1 values (NULL) on duplicate key update a = 30; -select * from t1 order by a; -insert into t1 values (30) on duplicate key update a = 40; -select * from t1 order by a; - -#Step 12: Vefify INSERT IGNORE (bug#32055) -insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL); -select * from t1 order by a; -drop table t1; - -#Step 13: Verify auto_increment of unique key -create table t1 (a int not null primary key, - b int not null unique auto_increment) engine ndb; -insert into t1 values (1, NULL); -insert into t1 values (3, NULL); -update t1 set b = 3 where a = 3; -insert into t1 values (4, NULL); -select * from t1 order by a; -drop table t1; - -#Step 14: Verify that auto_increment_increment and auto_increment_offset -# work as expected - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM; - -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_offset=5; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -TRUNCATE t1; -TRUNCATE t2; -SET @@session.auto_increment_increment=2; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; - -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 3; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 7; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 5; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -CREATE TABLE t1 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100; - -CREATE TABLE t2 ( - pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT, - b INT NOT NULL, - c INT NOT NULL UNIQUE -) ENGINE=MYISAM AUTO_INCREMENT = 100; - -SET @@session.auto_increment_offset=5; -SET @@session.auto_increment_increment=10; -INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2); -INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2); -SELECT * FROM t1 ORDER BY pk; -SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c; -DROP TABLE t1, t2; - -#Step 15: Now verify that behaviour on multiple MySQL Servers behave -# properly. Start by dropping table and recreating it to start -# counters and id caches from zero again. ---disable_warnings -connection server2; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -set ndb_autoincrement_prefetch_sz = 32; -drop table if exists t1; -connection server1; -SET @@session.auto_increment_offset=1; -SET @@session.auto_increment_increment=1; -set ndb_autoincrement_prefetch_sz = 32; ---enable_warnings - - -create table t1 (a int not null auto_increment primary key) engine ndb; -# Basic test, ensure that the second server gets a new range. -#Generate record with key = 1 -insert into t1 values (NULL); -connection server2; -#Generate record with key = 33 -insert into t1 values (NULL); -connection server1; -select * from t1 order by a; - -#This insert should not affect the range of the second server -insert into t1 values (20); -connection server2; -insert into t1 values (NULL); -select * from t1 order by a; - -connection server1; -#This insert should remove cached values but also skip values already -#taken by server2, given that there is no method of communicating with -#the other server it should also cause a conflict -connection server1; - -insert into t1 values (35); -insert into t1 values (NULL); -connection server2; ---error ER_DUP_ENTRY -insert into t1 values (NULL); -select * from t1 order by a; - -insert into t1 values (100); -insert into t1 values (NULL); -connection server1; -insert into t1 values (NULL); -select * from t1 order by a; - -set auto_increment_offset = @old_auto_increment_offset; -set auto_increment_increment = @old_auto_increment_increment; -set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz; - -drop table t1; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2294d836854..93722da574c 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2704,6 +2704,29 @@ int ha_ndbcluster::full_table_scan(uchar *buf) DBUG_RETURN(next_result(buf)); } +int +ha_ndbcluster::set_auto_inc(Field *field) +{ + DBUG_ENTER("ha_ndbcluster::set_auto_inc"); + Ndb *ndb= get_ndb(); + bool read_bit= bitmap_is_set(table->read_set, field->field_index); + bitmap_set_bit(table->read_set, field->field_index); + Uint64 next_val= (Uint64) field->val_int() + 1; + if (!read_bit) + bitmap_clear_bit(table->read_set, field->field_index); +#ifndef DBUG_OFF + char buff[22]; + DBUG_PRINT("info", + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); +#endif + Ndb_tuple_id_range_guard g(m_share); + if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); + DBUG_RETURN(0); +} + /* Insert one record into NDB */ @@ -2910,18 +2933,11 @@ int ha_ndbcluster::write_row(uchar *record) } if ((has_auto_increment) && (m_skip_auto_increment)) { - Ndb *ndb= get_ndb(); - Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; -#ifndef DBUG_OFF - char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(next_val, buff))); -#endif - Ndb_tuple_id_range_guard g(m_share); - if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); + int ret_val; + if ((ret_val= set_auto_inc(table->next_number_field))) + { + DBUG_RETURN(ret_val); + } } m_skip_auto_increment= TRUE; @@ -3046,6 +3062,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) // Insert new row DBUG_PRINT("info", ("delete succeded")); m_primary_key_update= TRUE; + /* + If we are updating a primary key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index) && + (error= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(error); + } insert_res= write_row(new_data); m_primary_key_update= FALSE; if (insert_res) @@ -3068,7 +3095,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_RETURN(0); } - + /* + If we are updating a unique key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index) && + (error= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(error); + } if (cursor) { /* @@ -4478,9 +4515,11 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb) // store thread specific data first to set the right context m_force_send= thd->variables.ndb_force_send; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; - m_autoincrement_prefetch= - (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; - + m_autoincrement_prefetch= + (thd->variables.ndb_autoincrement_prefetch_sz > + NDB_DEFAULT_AUTO_PREFETCH) ? + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz + : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH; m_active_trans= thd_ndb->trans; DBUG_ASSERT(m_active_trans); // Start of transaction @@ -6163,8 +6202,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong *first_value, ulonglong *nb_reserved_values) { - int cache_size; + uint cache_size; Uint64 auto_value; + THD *thd= current_thd; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); @@ -6174,11 +6214,14 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } - cache_size= - (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted : - ((m_rows_to_insert > m_autoincrement_prefetch) ? - m_rows_to_insert : m_autoincrement_prefetch)); + uint remaining= m_rows_to_insert - m_rows_inserted; + uint min_prefetch= + (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? + thd->variables.ndb_autoincrement_prefetch_sz + : remaining; + cache_size= ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) @@ -6265,7 +6308,7 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): m_dupkey((uint) -1), m_ha_not_exact_count(FALSE), m_force_send(TRUE), - m_autoincrement_prefetch((ha_rows) 32), + m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH), m_transaction_on(TRUE), m_cond(NULL), m_multi_cursor(NULL) -- cgit v1.2.1 From 6b018a16e270b30296b19ba0f1eea7ced157c936 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 6 Nov 2007 15:12:27 +0100 Subject: Bug #31484 Cluster LOST_EVENTS entry not added to binlog on mysqld restart - correction, do not insert GAP on first startup --- sql/ha_ndbcluster_binlog.cc | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 4bb17dffa4a..baa8b1ada66 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3725,8 +3725,25 @@ restart: /* Main NDB Injector loop */ - if (ndb_binlog_running) + while (ndb_binlog_running) { + /* + check if it is the first log, if so we do not insert a GAP event + as there is really no log to have a GAP in + */ + { + LOG_INFO log_info; + mysql_bin_log.get_current_log(&log_info); + int len= strlen(log_info.log_file_name); + uint no= 0; + if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) && + no == 1) + { + /* this is the fist log, so skip GAP event */ + break; + } + } + /* Always insert a GAP event as we cannot know what has happened in the cluster while not being connected. @@ -3739,8 +3756,9 @@ restart: IF_DBUG(int error=) inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]); DBUG_ASSERT(!error); - incident_id= 1; + break; } + incident_id= 1; { thd->proc_info= "Waiting for ndbcluster to start"; -- cgit v1.2.1 From 7d26353f0cb8d9d630ae6f5f6eaea8b7bd9b61ff Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 6 Nov 2007 22:28:44 +0100 Subject: only potentially skip GAP event on startup, not on cluster restart --- sql/ha_ndbcluster_binlog.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index baa8b1ada66..d9380b50f1e 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3731,6 +3731,7 @@ restart: check if it is the first log, if so we do not insert a GAP event as there is really no log to have a GAP in */ + if (incident_id == 0) { LOG_INFO log_info; mysql_bin_log.get_current_log(&log_info); -- cgit v1.2.1 From 36d1659a80ce09f480e2af9efa9c6eb32157ec02 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 7 Nov 2007 18:02:12 +0200 Subject: Bug #31928: Search fails on '1000-00-00' date after sql_mode change When constructing a key image stricter date checking (from sql_mode) should not be enabled, because it will reject invalid dates that the server would otherwise accept for searching when there's no index. Fixed by disabling strict date checking when constructing a key image. mysql-test/r/type_date.result: Bug #31928: test case mysql-test/t/type_date.test: Bug #31928: test case sql/sql_select.h: Bug #31928: Disable strict date checking when consructing a key image --- mysql-test/r/type_date.result | 39 ++++++++++++++++++++++++++++++++++++++- mysql-test/t/type_date.test | 20 ++++++++++++++++++++ sql/sql_select.h | 11 +++++++---- 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index bd2a43569dd..3cf1a166e7f 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -202,7 +202,6 @@ a Warnings: Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 -Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 SELECT * FROM t2 WHERE a = '0000-00-00'; a 0000-00-00 @@ -214,4 +213,42 @@ INSERT INTO t1 VALUES ('0000-00-00'); ERROR 22007: Incorrect date value: '0000-00-00' for column 'a' at row 1 SET SQL_MODE=DEFAULT; DROP TABLE t1,t2; +CREATE TABLE t1 (a DATE); +CREATE TABLE t2 (a DATE); +CREATE INDEX i ON t1 (a); +INSERT INTO t1 VALUES ('1000-00-00'),('1000-00-00'); +INSERT INTO t2 VALUES ('1000-00-00'),('1000-00-00'); +SELECT * FROM t1 WHERE a = '1000-00-00'; +a +1000-00-00 +1000-00-00 +SELECT * FROM t2 WHERE a = '1000-00-00'; +a +1000-00-00 +1000-00-00 +SET SQL_MODE=TRADITIONAL; +EXPLAIN SELECT * FROM t1 WHERE a = '1000-00-00'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref i i 4 const 1 Using where; Using index +Warnings: +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +SELECT * FROM t1 WHERE a = '1000-00-00'; +a +1000-00-00 +1000-00-00 +Warnings: +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +SELECT * FROM t2 WHERE a = '1000-00-00'; +a +1000-00-00 +1000-00-00 +Warnings: +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +Warning 1292 Incorrect date value: '1000-00-00' for column 'a' at row 1 +INSERT INTO t1 VALUES ('1000-00-00'); +ERROR 22007: Incorrect date value: '1000-00-00' for column 'a' at row 1 +SET SQL_MODE=DEFAULT; +DROP TABLE t1,t2; End of 5.0 tests diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test index 507537457d3..885fad3b623 100644 --- a/mysql-test/t/type_date.test +++ b/mysql-test/t/type_date.test @@ -190,4 +190,24 @@ INSERT INTO t1 VALUES ('0000-00-00'); SET SQL_MODE=DEFAULT; DROP TABLE t1,t2; +# +# Bug #31928: Search fails on '1000-00-00' date after sql_mode change +# + +CREATE TABLE t1 (a DATE); +CREATE TABLE t2 (a DATE); +CREATE INDEX i ON t1 (a); +INSERT INTO t1 VALUES ('1000-00-00'),('1000-00-00'); +INSERT INTO t2 VALUES ('1000-00-00'),('1000-00-00'); +SELECT * FROM t1 WHERE a = '1000-00-00'; +SELECT * FROM t2 WHERE a = '1000-00-00'; +SET SQL_MODE=TRADITIONAL; +EXPLAIN SELECT * FROM t1 WHERE a = '1000-00-00'; +SELECT * FROM t1 WHERE a = '1000-00-00'; +SELECT * FROM t2 WHERE a = '1000-00-00'; +--error ER_TRUNCATED_WRONG_VALUE +INSERT INTO t1 VALUES ('1000-00-00'); +SET SQL_MODE=DEFAULT; +DROP TABLE t1,t2; + --echo End of 5.0 tests diff --git a/sql/sql_select.h b/sql/sql_select.h index 4fc32e7fdb3..42be8d3ec68 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -548,14 +548,17 @@ public: enum store_key_result copy() { enum store_key_result result; - enum_check_fields saved_count_cuted_fields= - to_field->table->in_use->count_cuted_fields; + THD *thd= to_field->table->in_use; + enum_check_fields saved_count_cuted_fields= thd->count_cuted_fields; + ulong sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); - to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; result= copy_inner(); - to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + thd->count_cuted_fields= saved_count_cuted_fields; + thd->variables.sql_mode= sql_mode; return result; } -- cgit v1.2.1 From 327cd42a7f0cf18b8a6b13ac7cf4b04733df07e3 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 7 Nov 2007 20:57:21 +0100 Subject: ndb - bug#32160 (recommit to 5.0) fix lcp master take over bug ndb/src/kernel/blocks/ERROR_codes.txt: new error codes ndb/src/kernel/blocks/dbdih/Dbdih.hpp: add debug code ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: fix master lcp bug add 2 new error codes ndb/test/ndbapi/testNodeRestart.cpp: testcase ndb/test/run-test/daily-basic-tests.txt: testcase --- ndb/src/kernel/blocks/ERROR_codes.txt | 7 +++- ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 10 ++++++ ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 60 +++++++++++++++++++++++++++---- ndb/test/ndbapi/testNodeRestart.cpp | 48 +++++++++++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 +++ 5 files changed, 122 insertions(+), 7 deletions(-) diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index e45c608b601..2599bf40988 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -5,7 +5,7 @@ Next DBACC 3002 Next DBTUP 4014 Next DBLQH 5043 Next DBDICT 6007 -Next DBDIH 7183 +Next DBDIH 7195 Next DBTC 8052 Next CMVMI 9000 Next BACKUP 10022 @@ -73,6 +73,11 @@ Delay GCP_SAVEREQ by 10 secs 7180: Crash master during master-take-over in execMASTER_LCPCONF +7193: Dont send LCP_FRAG_ORD to self, and crash when sending first + LCP_FRAG_ORD(last) + +7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF + ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING: ----------------------------------------------------------------- diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index ca91f56909d..e471a953391 100644 --- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -1291,7 +1291,17 @@ private: LcpStatus lcpStatus; Uint32 lcpStatusUpdatedPlace; + struct Save { + LcpStatus m_status; + Uint32 m_place; + } m_saveState[10]; + void setLcpStatus(LcpStatus status, Uint32 line){ + for (Uint32 i = 9; i > 0; i--) + m_saveState[i] = m_saveState[i-1]; + m_saveState[0].m_status = lcpStatus; + m_saveState[0].m_place = lcpStatusUpdatedPlace; + lcpStatus = status; lcpStatusUpdatedPlace = line; } diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 9191bb3fb9b..88d167f0985 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -4764,11 +4764,19 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr) } jam(); - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = failedNodePtr.i; - signal->theData[2] = 0; // Tab id - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - + + if (!ERROR_INSERTED(7194)) + { + signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; + signal->theData[1] = failedNodePtr.i; + signal->theData[2] = 0; // Tab id + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); + } + else + { + ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE"); + } + setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE); }//Dbdih::startRemoveFailedNode() @@ -5676,12 +5684,22 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){ signal->theData[0] = 7012; execDUMP_STATE_ORD(signal); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE"); + signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; + signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId; + signal->theData[2] = 0; // Tab id + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); + } c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__); MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; req->masterRef = reference(); req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId; sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ); + } else { sendMASTER_LCPCONF(signal); } @@ -5998,6 +6016,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal) { const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; jamEntry(); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("delaying MASTER_LCPCONF due to error 7194"); + sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal, + 300, signal->getLength()); + return; + } + Uint32 senderNodeId = conf->senderNodeId; MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState; const Uint32 failedNodeId = conf->failedNodeId; @@ -6132,7 +6159,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) #endif c_lcpState.keepGci = SYSFILE->keepGCI; - c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__); startLcpRoundLoopLab(signal, 0, 0); break; } @@ -9924,6 +9950,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal) if(ERROR_INSERTED(7075)){ continue; } + + CRASH_INSERTION(7193); BlockReference ref = calcLqhBlockRef(nodePtr.i); sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB); } @@ -10121,6 +10149,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){ CRASH_INSERTION2(7017, !isMaster()); c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("CLEARING 7194"); + CLEAR_ERROR_INSERT_VALUE; + } + return true; } @@ -10276,6 +10311,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal, BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode); + if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId()) + { + return; + } + LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; lcpFragOrd->tableId = info.tableId; lcpFragOrd->fragmentId = info.fragId; @@ -13686,6 +13726,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) ("immediateLcpStart = %d masterLcpNodeId = %d", c_lcpState.immediateLcpStart, refToNode(c_lcpState.m_masterLcpDihRef)); + + for (Uint32 i = 0; i<10; i++) + { + infoEvent("%u : status: %u place: %u", i, + c_lcpState.m_saveState[i].m_status, + c_lcpState.m_saveState[i].m_place); + } + infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); } diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 03a60b1b525..12b0187b71f 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -1347,6 +1347,51 @@ runBug28717(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int +runBug32160(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + Ndb* pNdb = GETNDB(step); + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + { + return NDBT_OK; + } + + int master = res.getMasterNodeId(); + int next = res.getNextMasterNodeId(master); + + if (res.insertErrorInNode(next, 7194)) + { + return NDBT_FAILED; + } + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + if (res.dumpStateOneNode(master, val2, 2)) + return NDBT_FAILED; + + if (res.insertErrorInNode(master, 7193)) + return NDBT_FAILED; + + int val3[] = { 7099 }; + if (res.dumpStateOneNode(master, val3, 1)) + return NDBT_FAILED; + + if (res.waitNodesNoStart(&master, 1)) + return NDBT_FAILED; + + if (res.startNodes(&master, 1)) + return NDBT_FAILED; + + if (res.waitClusterStarted()) + return NDBT_FAILED; + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", "Test that one node at a time can be stopped and then restarted "\ @@ -1686,6 +1731,9 @@ TESTCASE("Bug28717", ""){ TESTCASE("Bug29364", ""){ INITIALIZER(runBug29364); } +TESTCASE("Bug32160", ""){ + INITIALIZER(runBug32160); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 4f7ba26bf27..7b4a4ca0e2d 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -497,6 +497,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug26481 T1 +max-time: 300 +cmd: testNodeRestart +args: -n Bug32160 T1 + # OLD FLEX max-time: 500 cmd: flexBench -- cgit v1.2.1 From 4d0c67c6d3b24fb1d3ed76f6393674597bf6121b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 8 Nov 2007 15:05:38 +0100 Subject: testDict.cpp: Updated DropDDObjects in testDict to ignore the user tables created in the MySQL database storage/ndb/test/ndbapi/testDict.cpp: Updated DropDDObjects in testDict to ignore the user tables created in the MySQL database --- storage/ndb/test/ndbapi/testDict.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index 16b6e129605..e1b8f2b3c7f 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -2776,9 +2776,13 @@ runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){ case NdbDictionary::Object::UserTable: tableFound = list.elements[i].name; if(tableFound != 0){ - if(pDict->dropTable(tableFound) != 0){ - g_err << "Failed to drop table: " << pDict->getNdbError() << endl; - return NDBT_FAILED; + if(strcmp(tableFound, "ndb_apply_status") != 0 && + strcmp(tableFound, "NDB$BLOB_2_3") != 0 && + strcmp(tableFound, "ndb_schema") != 0){ + if(pDict->dropTable(tableFound) != 0){ + g_err << "Failed to drop table: " << tableFound << pDict->getNdbError() << endl; + return NDBT_FAILED; + } } } tableFound = 0; -- cgit v1.2.1 From 06323c2b742a16ed54069251321b4b57085930ca Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 10 Nov 2007 16:54:25 +0400 Subject: Bug #32063 "create table like" works case-significant only in "embedded" server (libmysqld) in mysql_creata_like_table() we 'downcase' the complete path to the .frm file. It works fine in standalone case as there usually we only have './' as a path to the datahome, but doesn't work in the embedded server where we add the real path there, so if a directory has uppercase letters in it's name, it won't be found. Fixed by 'downcasing' only database/table pair. sql/sql_table.cc: Bug #32063 "create table like" works case-significant only in "embedded" server (libmysqld) do not lowercase the database directory --- sql/sql_table.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 0316d6a3c10..a1df7e21b73 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2429,12 +2429,12 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, strxmov(src_path, (*tmp_table)->path, reg_ext, NullS); else { - strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table, - reg_ext, NullS); + char *tablename_pos= strxmov(src_path, mysql_data_home, "/", NullS); + strxmov(tablename_pos, src_db, "/", src_table, reg_ext, NullS); + if (lower_case_table_names) + my_casedn_str(files_charset_info, tablename_pos); /* Resolve symlinks (for windows) */ fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME)); - if (lower_case_table_names) - my_casedn_str(files_charset_info, src_path); if (access(src_path, F_OK)) { my_error(ER_BAD_TABLE_ERROR, MYF(0), src_table); -- cgit v1.2.1 From eb347921b736c06651189623b1dcd92a6d6d1039 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 13 Nov 2007 10:51:47 +0400 Subject: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) max_length parameter for BLOB-returning functions must be big enough for any possible content. Otherwise the field created for a table will be too small. mysql-test/r/gis.result: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) test result mysql-test/t/gis.test: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) test case sql/field.cc: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) max_field_size used instead of numeric value sql/field.h: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) max_field_size constant defined sql/item_geofunc.cc: Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) max_length parameter fixed --- mysql-test/r/gis.result | 26 +++++++++++++++++++++++++- mysql-test/t/gis.test | 31 +++++++++++++++++++++++++++++++ sql/field.cc | 2 +- sql/field.h | 1 + sql/item_geofunc.cc | 2 +- 5 files changed, 59 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 55f70e59fcf..ff4673d4c0f 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -668,7 +668,7 @@ def test t1 t1 g g 255 4294967295 0 Y 144 0 63 g select asbinary(g) from t1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def asbinary(g) 252 8192 0 Y 128 0 63 +def asbinary(g) 252 4294967295 0 Y 128 0 63 asbinary(g) drop table t1; create table t1 (a TEXT, b GEOMETRY NOT NULL, SPATIAL KEY(b)); @@ -730,4 +730,28 @@ select geomfromtext(col9,col89) as a from t1; a NULL DROP TABLE t1; +CREATE TABLE t1 ( +geomdata polygon NOT NULL, +SPATIAL KEY index_geom (geomdata) +) ENGINE=MyISAM DEFAULT CHARSET=latin2 DELAY_KEY_WRITE=1 ROW_FORMAT=FIXED; +CREATE TABLE t2 ( +geomdata polygon NOT NULL, +SPATIAL KEY index_geom (geomdata) +) ENGINE=MyISAM DEFAULT CHARSET=latin2 DELAY_KEY_WRITE=1 ROW_FORMAT=FIXED; +CREATE TABLE t3 +select +aswkb(ws.geomdata) AS geomdatawkb +from +t1 ws +union +select +aswkb(ws.geomdata) AS geomdatawkb +from +t2 ws; +describe t3; +Field Type Null Key Default Extra +geomdatawkb longblob YES NULL +drop table t1; +drop table t2; +drop table t3; End of 4.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index cf5c3b31bc1..3b9514bebbd 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -427,4 +427,35 @@ INSERT INTO `t1` VALUES ('','0000-00-00'); select geomfromtext(col9,col89) as a from t1; DROP TABLE t1; +# +# Bug #31158 Spatial, Union, LONGBLOB vs BLOB bug (crops data) +# + +CREATE TABLE t1 ( + geomdata polygon NOT NULL, + SPATIAL KEY index_geom (geomdata) +) ENGINE=MyISAM DEFAULT CHARSET=latin2 DELAY_KEY_WRITE=1 ROW_FORMAT=FIXED; + +CREATE TABLE t2 ( + geomdata polygon NOT NULL, + SPATIAL KEY index_geom (geomdata) +) ENGINE=MyISAM DEFAULT CHARSET=latin2 DELAY_KEY_WRITE=1 ROW_FORMAT=FIXED; + +CREATE TABLE t3 +select + aswkb(ws.geomdata) AS geomdatawkb + from + t1 ws +union + select + aswkb(ws.geomdata) AS geomdatawkb + from + t2 ws; + +describe t3; + +drop table t1; +drop table t2; +drop table t3; + --echo End of 4.1 tests diff --git a/sql/field.cc b/sql/field.cc index 5b43b9b1955..ed6523e9299 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -6975,7 +6975,7 @@ uint32 Field_blob::max_length() case 3: return 16777215 * field_charset->mbmaxlen; case 4: - return (uint32) 4294967295U; + return max_field_size; default: DBUG_ASSERT(0); // we should never go here return 0; diff --git a/sql/field.h b/sql/field.h index 20f1209a439..29be9ef38d4 100644 --- a/sql/field.h +++ b/sql/field.h @@ -26,6 +26,7 @@ #define NOT_FIXED_DEC 31 #define DATETIME_DEC 6 +const uint32 max_field_size= (uint32) 4294967295U; class Send_field; class Protocol; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 6cb8c790319..7900396e5d8 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -29,7 +29,7 @@ void Item_geometry_func::fix_length_and_dec() { collation.set(&my_charset_bin); decimals=0; - max_length=MAX_BLOB_WIDTH; + max_length= max_field_size; maybe_null= 1; } -- cgit v1.2.1 From ee21ace74dcea0638835b46af44311e286d000f4 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 13 Nov 2007 12:10:22 +0400 Subject: Bug #31960 All embedded test crash Crash happens as a result of NO_EMBEDDED_ACCESS_CHECKS option (which is default for embedded server). check_table_access failed on using unintialized structure. Better solutions here is to disable that code completely in this case. Though the crash happens only in 6.0 i belive it's good to do it in 5.1 sql/mysql_priv.h: Bug #31960 All embedded test crash. Access check functions from sql_parse.cc defined as FALSE if NO_EMBEDDED_ACCESS_CHECKS sql/sql_parse.cc: Bug #31960 All embedded test crash. Implementation of access checkings functions #ifdefed out for NO_EMBEDDED_ACCESS_CHECKS --- sql/mysql_priv.h | 36 ++++++++++++++++++++++++++++++++++++ sql/sql_parse.cc | 20 ++------------------ 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 3b88fe0fca8..32e8574b979 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -680,6 +680,8 @@ void free_items(Item *item); void cleanup_items(Item *item); class THD; void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); bool check_single_table_access(THD *thd, ulong privilege, TABLE_LIST *tables, bool no_errors); @@ -688,6 +690,24 @@ bool check_routine_access(THD *thd,ulong want_access,char *db,char *name, bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table); bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list); bool check_some_routine_access(THD *thd, const char *db, const char *name, bool is_proc); +#else +inline bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables) +{ return false; } +inline bool check_single_table_access(THD *thd, ulong privilege, + TABLE_LIST *tables, bool no_errors) +{ return false; } +inline bool check_routine_access(THD *thd,ulong want_access,char *db, + char *name, bool is_proc, bool no_errors) +{ return false; } +inline bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table) +{ return false; } +inline bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list) +{ return false; } +inline bool check_some_routine_access(THD *thd, const char *db, + const char *name, bool is_proc) +{ return false; } +#endif /*NO_EMBEDDED_ACCESS_CHECKS*/ + bool multi_update_precheck(THD *thd, TABLE_LIST *tables); bool multi_delete_precheck(THD *thd, TABLE_LIST *tables); bool mysql_multi_update_prepare(THD *thd); @@ -992,11 +1012,27 @@ void kill_mysql(void); void close_connection(THD *thd, uint errcode, bool lock); bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, bool *write_to_binlog); +#ifndef NO_EMBEDDED_ACCESS_CHECKS bool check_access(THD *thd, ulong access, const char *db, ulong *save_priv, bool no_grant, bool no_errors, bool schema_db); bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables, bool no_errors); bool check_global_access(THD *thd, ulong want_access); +#else +inline bool check_access(THD *thd, ulong access, const char *db, + ulong *save_priv, bool no_grant, bool no_errors, + bool schema_db) +{ + if (save_priv) + *save_priv= GLOBAL_ACLS; + return false; +} +inline bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables, + bool no_errors) +{ return false; } +inline bool check_global_access(THD *thd, ulong want_access) +{ return false; } +#endif /*NO_EMBEDDED_ACCESS_CHECKS*/ /* Support routine for SQL parser on partitioning syntax diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index e6d953bcbe1..7334f989905 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3893,14 +3893,12 @@ create_sp_error: thd->server_status|= SERVER_MORE_RESULTS_EXISTS; } -#ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_routine_access(thd, EXECUTE_ACL, sp->m_db.str, sp->m_name.str, TRUE, FALSE)) { thd->net.no_send_ok= save_no_send_ok; goto error; } -#endif select_limit= thd->variables.select_limit; thd->variables.select_limit= HA_POS_ERROR; @@ -4580,6 +4578,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) } +#ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check grants for commands which work only with one table. @@ -4697,7 +4696,6 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, bool dont_check_global_grants, bool no_errors, bool schema_db) { Security_context *sctx= thd->security_ctx; -#ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; /* GRANT command: @@ -4710,7 +4708,6 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, */ bool db_is_pattern= (test(want_access & GRANT_ACL) && dont_check_global_grants); -#endif ulong dummy; DBUG_ENTER("check_access"); DBUG_PRINT("enter",("db: %s want_access: %lu master_access: %lu", @@ -4749,9 +4746,6 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, } } -#ifdef NO_EMBEDDED_ACCESS_CHECKS - DBUG_RETURN(0); -#else if ((sctx->master_access & want_access) == want_access) { /* @@ -4809,7 +4803,6 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, thd->db : "unknown"))); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -4834,16 +4827,12 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, bool check_global_access(THD *thd, ulong want_access) { -#ifdef NO_EMBEDDED_ACCESS_CHECKS - return 0; -#else char command[128]; if ((thd->security_ctx->master_access & want_access)) return 0; get_privilege_desc(command, sizeof(command), want_access); my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); return 1; -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -4933,9 +4922,7 @@ bool check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, bool no_errors) { -#ifndef NO_EMBEDDED_ACCESS_CHECKS TABLE_LIST *org_tables= tables; -#endif TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table(); Security_context *sctx= thd->security_ctx, *backup_ctx= thd->security_ctx; /* @@ -5022,11 +5009,7 @@ check_routine_access(THD *thd, ulong want_access,char *db, char *name, 0, no_errors, 0)) return TRUE; -#ifndef NO_EMBEDDED_ACCESS_CHECKS return check_grant_routine(thd, want_access, tables, is_proc, no_errors); -#else - return FALSE; -#endif } @@ -5116,6 +5099,7 @@ bool check_merge_table_access(THD *thd, char *db, return error; } +#endif /*NO_EMBEDDED_ACCESS_CHECKS*/ /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue -- cgit v1.2.1 From b88cfbad684ed57b058a3e6b63d458516b41db85 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 13 Nov 2007 13:09:59 +0400 Subject: Bug #31868 mysql_server_init crash when language path is not correctly set. When mysql_server_init() interrupts on some error (wrong errmsg file for example) in the middle of it's execution, it doesn't call execute_ddl_log_recovery() so LOCK_gdl mutex isn't init-ed. In this case we shouldn't execute release_ddl_log during cleanup as it uses that mutex inside. BitKeeper/etc/ignore: Added libmysqld/scheduler.cc libmysqld/sql_connect.cc libmysqld/sql_tablespace.cc libmysql_r/client_settings.h to the ignore list libmysqld/lib_sql.cc: Bug #31868 mysql_server_init crash when language path is not correctly set. line moved to clean_up() sql/mysqld.cc: Bug #31868 mysql_server_init crash when language path is not correctly set. release_ddl_log() now can be called from common 'clean_up()' sql/sql_table.cc: Bug #31868 mysql_server_init crash when language path is not correctly set. do_release flag added to the global_ddl_log and the construcntor to set it's initial value. Also now release_ddl_log() checks for that flag. --- .bzrignore | 4 ++++ libmysqld/lib_sql.cc | 1 - sql/mysqld.cc | 3 ++- sql/sql_table.cc | 13 ++++++++++--- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/.bzrignore b/.bzrignore index e1ad5a89015..b3cdfeefb4f 100644 --- a/.bzrignore +++ b/.bzrignore @@ -3004,3 +3004,7 @@ win/vs71cache.txt win/vs8cache.txt zlib/*.ds? zlib/*.vcproj +libmysqld/scheduler.cc +libmysqld/sql_connect.cc +libmysqld/sql_tablespace.cc +libmysql_r/client_settings.h diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 4e525f8447f..e9c54a3bd5c 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -551,7 +551,6 @@ void end_embedded_server() { my_free((char*) copy_arguments_ptr, MYF(MY_ALLOW_ZERO_PTR)); copy_arguments_ptr=0; - release_ddl_log(); clean_up(0); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index a355c560996..09fe7fc1f2e 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1164,6 +1164,8 @@ void clean_up(bool print_message) if (cleanup_done++) return; /* purecov: inspected */ + release_ddl_log(); + /* make sure that handlers finish up what they have that is dependent on the binlog @@ -3995,7 +3997,6 @@ we force server id to 2, but this MySQL server will not act as a slave."); pthread_cond_wait(&COND_thread_count,&LOCK_thread_count); (void) pthread_mutex_unlock(&LOCK_thread_count); - release_ddl_log(); #if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) if (Service.IsNT() && start_mode) Service.Stop(); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 86d1fe79a00..a8efdac57dc 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -279,7 +279,7 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) */ -typedef struct st_global_ddl_log +struct st_global_ddl_log { /* We need to adjust buffer size to be able to handle downgrades/upgrades @@ -297,10 +297,12 @@ typedef struct st_global_ddl_log uint name_len; uint io_size; bool inited; + bool do_release; bool recovery_phase; -} GLOBAL_DDL_LOG; + st_global_ddl_log() : inited(false), do_release(false) {} +}; -GLOBAL_DDL_LOG global_ddl_log; +st_global_ddl_log global_ddl_log; pthread_mutex_t LOCK_gdl; @@ -460,6 +462,7 @@ static uint read_ddl_log_header() global_ddl_log.first_used= NULL; global_ddl_log.num_entries= 0; VOID(pthread_mutex_init(&LOCK_gdl, MY_MUTEX_INIT_FAST)); + global_ddl_log.do_release= true; DBUG_RETURN(entry_no); } @@ -1150,6 +1153,9 @@ void release_ddl_log() DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used; DBUG_ENTER("release_ddl_log"); + if (!global_ddl_log.do_release) + DBUG_VOID_RETURN; + pthread_mutex_lock(&LOCK_gdl); while (used_list) { @@ -1167,6 +1173,7 @@ void release_ddl_log() global_ddl_log.inited= 0; pthread_mutex_unlock(&LOCK_gdl); VOID(pthread_mutex_destroy(&LOCK_gdl)); + global_ddl_log.do_release= false; DBUG_VOID_RETURN; } -- cgit v1.2.1 From 7e7668e4b5c60e0f1824a85ea5de0d40d7373cc1 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Nov 2007 10:46:34 +0100 Subject: ndb - bug#32359 Increase connection negotiation timeout Improve some weirdness storage/ndb/src/common/transporter/Transporter.cpp: - Increase connection negotiation timeout - return false if trying to connect when already connected (should still be assert) storage/ndb/src/common/transporter/TransporterRegistry.cpp: Use correct "is_connected" checks (although this is kind of bogus and should be cleaned-up) --- storage/ndb/src/common/transporter/Transporter.cpp | 4 ++-- storage/ndb/src/common/transporter/TransporterRegistry.cpp | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp index cec018575e0..269a5fba4e9 100644 --- a/storage/ndb/src/common/transporter/Transporter.cpp +++ b/storage/ndb/src/common/transporter/Transporter.cpp @@ -70,7 +70,7 @@ Transporter::Transporter(TransporterRegistry &t_reg, signalIdUsed = _signalId; m_connected = false; - m_timeOutMillis = 1000; + m_timeOutMillis = 30000; m_connect_address.s_addr= 0; if(s_port<0) @@ -101,7 +101,7 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) { if(m_connected) { - DBUG_RETURN(true); // TODO assert(0); + DBUG_RETURN(false); // TODO assert(0); } { diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 5f5f3c17b2d..848738b2983 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -758,7 +758,8 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) TCP_Transporter * t = theTCPTransporters[i]; // If the transporter is connected - if (t->isConnected()) { + NodeId nodeId = t->getRemoteNodeId(); + if (is_connected(nodeId) && t->isConnected()) { const NDB_SOCKET_TYPE socket = t->getSocket(); // Find the highest socket value. It will be used by select -- cgit v1.2.1 From 90c05bd8e44c9680041017efd108c0c909927c5b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Nov 2007 11:52:40 +0100 Subject: ndb - Add new dump 8011 which dumps all subscribers storage/ndb/src/kernel/blocks/suma/Suma.cpp: Add new dump 8011 which dumps all subscribers --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 7845b83693c..a4e886b14b6 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -974,6 +974,54 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ } return; } + + if (tCase == 8011) + { + jam(); + Uint32 bucket = signal->theData[1]; + KeyTable::Iterator it; + if (signal->getLength() == 1) + { + jam(); + bucket = 0; + infoEvent("-- Starting dump of subscribers --"); + } + + c_tables.next(bucket, it); + const Uint32 RT_BREAK = 16; + for(Uint32 i = 0; im_tableId, + it.curr.p->m_schemaVersion, + it.curr.p->n_subscribers); + + Ptr ptr; + LocalDLList list(c_subscriberPool, it.curr.p->c_subscribers); + for (list.first(ptr); !ptr.isNull(); list.next(ptr), i++) + { + jam(); + infoEvent(" [ %x %u %u ]", + ptr.p->m_senderRef, + ptr.p->m_senderData, + ptr.p->m_subPtrI); + } + c_tables.next(it); + } + + signal->theData[0] = tCase; + signal->theData[1] = it.bucket; + sendSignalWithDelay(reference(), GSN_DUMP_STATE_ORD, signal, 100, 2); + return; + } } /************************************************************* -- cgit v1.2.1 From 79f8eeed86e81245a6c0b9059e3dc0363895085c Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Nov 2007 13:28:45 +0100 Subject: ndb - add new testtool that does connect/disconnect in loop storage/ndb/test/tools/Makefile.am: add new test tool storage/ndb/test/tools/connect.cpp: New BitKeeper file ``storage/ndb/test/tools/connect.cpp'' --- storage/ndb/test/tools/Makefile.am | 3 +- storage/ndb/test/tools/connect.cpp | 152 +++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 storage/ndb/test/tools/connect.cpp diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am index 1683d4d84ae..da715caa1cb 100644 --- a/storage/ndb/test/tools/Makefile.am +++ b/storage/ndb/test/tools/Makefile.am @@ -13,7 +13,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency +ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency ndb_connect # transproxy @@ -35,6 +35,7 @@ ndb_cpcc_SOURCES = cpcc.cpp listen_event_SOURCES = listen.cpp eventlog_SOURCES = log_listner.cpp rep_latency_SOURCES = rep_latency.cpp +ndb_connect_SOURCES = connect.cpp include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am diff --git a/storage/ndb/test/tools/connect.cpp b/storage/ndb/test/tools/connect.cpp new file mode 100644 index 00000000000..2d3ac34d3e8 --- /dev/null +++ b/storage/ndb/test/tools/connect.cpp @@ -0,0 +1,152 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include +#include +#include +#include + +NDB_STD_OPTS_VARS; + +static int _loop = 25; +static int _sleep = 25; +static int _drop = 1; + +typedef uchar* gptr; + +static struct my_option my_long_options[] = +{ + NDB_STD_OPTS("ndb_desc"), + { "loop", 'l', "loops", + (gptr*) &_loop, (gptr*) &_loop, 0, + GET_INT, REQUIRED_ARG, _loop, 0, 0, 0, 0, 0 }, + { "sleep", 's', "Sleep (ms) between connection attempt", + (gptr*) &_sleep, (gptr*) &_sleep, 0, + GET_INT, REQUIRED_ARG, _sleep, 0, 0, 0, 0, 0 }, + { "drop", 'd', + "Drop event operations before disconnect (0 = no, 1 = yes, else rand", + (gptr*) &_drop, (gptr*) &_drop, 0, + GET_INT, REQUIRED_ARG, _drop, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} +}; + +static void usage() +{ + char desc[] = "This program connects to ndbd, and then disconnects\n"; + ndb_std_print_version(); + my_print_help(my_long_options); + my_print_variables(my_long_options); +} + +int main(int argc, char** argv){ + NDB_INIT(argv[0]); + + const char *load_default_groups[]= { "mysql_cluster",0 }; + load_defaults("my",load_default_groups,&argc,&argv); + int ho_error; +#ifndef DBUG_OFF + opt_debug= "d:t:O,/tmp/ndb_desc.trace"; +#endif + if ((ho_error=handle_options(&argc, &argv, my_long_options, + ndb_std_get_one_option))) + return NDBT_ProgramExit(NDBT_WRONGARGS); + + for (int i = 0; i<_loop; i++) + { + Ndb_cluster_connection con(opt_connect_str); + if(con.connect(12, 5, 1) != 0) + { + ndbout << "Unable to connect to management server." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + if (con.wait_until_ready(30,30) != 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + + Ndb MyNdb(&con, "TEST_DB"); + if(MyNdb.init() != 0){ + ERR(MyNdb.getNdbError()); + return NDBT_ProgramExit(NDBT_FAILED); + } + + Vector ops; + const NdbDictionary::Dictionary * dict= MyNdb.getDictionary(); + for (int j = 0; j < argc; j++) + { + const NdbDictionary::Table * pTab = dict->getTable(argv[j]); + if (pTab == 0) + { + ndbout_c("Failed to retreive table: \"%s\"", argv[j]); + } + + BaseString tmp; + tmp.appfmt("EV-%s", argv[j]); + NdbEventOperation* pOp = MyNdb.createEventOperation(tmp.c_str()); + if ( pOp == NULL ) + { + ndbout << "Event operation creation failed: " << + MyNdb.getNdbError() << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + + for (int a = 0; a < pTab->getNoOfColumns(); a++) + { + pOp->getValue(pTab->getColumn(a)->getName()); + pOp->getPreValue(pTab->getColumn(a)->getName()); + } + + if (pOp->execute()) + { + ndbout << "operation execution failed: " << pOp->getNdbError() + << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + ops.push_back(pOp); + } + + if (_sleep) + { + NdbSleep_MilliSleep(10 + rand() % _sleep); + } + + for (Uint32 i = 0; i 50) + goto do_drop; + } + } + } + + return NDBT_ProgramExit(NDBT_OK); +} + +template class Vector; -- cgit v1.2.1 From c33c92d0f2d94e796aa403b80b29477e3400659b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Nov 2007 22:20:31 +0400 Subject: Bug #31890 Partitions: ORDER BY DESC in InnoDB not working. It's not InnoDB specific bug. Error is in QUEUE code, about the way we handle queue->max_at_top. It's either '0' or '-2' and we do '^' operation to get the proper direction. Though queue->compare() function can return '-2' as a result of comparison sometimes. So we'll get queue->compare() ^ queue->max_at_top == 0 (when max_at_top is -2) and _downheap() function code will go wrong way here: ... if (next_index < elements && (queue->compare(queue->first_cmp_arg, queue->root[next_index]+offset_to_key, queue->root[next_index+1]+offset_to_key) ^ queue->max_at_top) > 0) next_index++; ... Fixed by changing max_at_top to be either 1 or -1, doing '* max_at_top' to get proper direction. include/queues.h: Bug #31890 Partitions: ORDER BY DESC in InnoDB not working max_at_top policy changed mysql-test/r/partition.result: Bug #31890 Partitions: ORDER BY DESC in InnoDB not working test result mysql-test/t/partition.test: Bug #31890 Partitions: ORDER BY DESC in InnoDB not working test case mysys/queues.c: Bug #31890 Partitions: ORDER BY DESC in InnoDB not working. queue->max_at_top policy changed - now it can either be '1' or '-1'. We multiply comparison result on max_at_top to get the proper direction. --- include/queues.h | 6 +++--- mysql-test/r/partition.result | 35 +++++++++++++++++++++++++++++++++++ mysql-test/t/partition.test | 27 +++++++++++++++++++++++++++ mysys/queues.c | 36 +++++++++++++++--------------------- 4 files changed, 80 insertions(+), 24 deletions(-) diff --git a/include/queues.h b/include/queues.h index 4fd0f72484e..d01b73ba999 100644 --- a/include/queues.h +++ b/include/queues.h @@ -31,8 +31,8 @@ typedef struct st_queue { void *first_cmp_arg; uint elements; uint max_elements; - uint offset_to_key; /* compare is done on element+offset */ - int max_at_top; /* Set if queue_top gives max */ + uint offset_to_key; /* compare is done on element+offset */ + int max_at_top; /* Normally 1, set to -1 if queue_top gives max */ int (*compare)(void *, uchar *,uchar *); uint auto_extent; } QUEUE; @@ -43,7 +43,7 @@ typedef struct st_queue { #define queue_replaced(queue) _downheap(queue,1) #define queue_set_cmp_arg(queue, set_arg) (queue)->first_cmp_arg= set_arg #define queue_set_max_at_top(queue, set_arg) \ - (queue)->max_at_top= set_arg ? (-1 ^ 1) : 0 + (queue)->max_at_top= set_arg ? -1 : 1 typedef int (*queue_compare)(void *,uchar *, uchar *); int init_queue(QUEUE *queue,uint max_elements,uint offset_to_key, diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index b3a498ff0ab..589058c04a6 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1296,4 +1296,39 @@ create table t1 partition by key(s1) partitions 3; insert into t1 values (null,null); drop table t1; +CREATE TABLE t1 +(int_column INT, char_column CHAR(5), +PRIMARY KEY(char_column,int_column)) +PARTITION BY KEY(char_column,int_column) +PARTITIONS 101; +INSERT INTO t1 (int_column, char_column) VALUES +( 39868 ,'zZZRW'), +( 545592 ,'zZzSD'), +( 4936 ,'zzzsT'), +( 9274 ,'ZzZSX'), +( 970185 ,'ZZzTN'), +( 786036 ,'zZzTO'), +( 37240 ,'zZzTv'), +( 313801 ,'zzzUM'), +( 782427 ,'ZZZva'), +( 907955 ,'zZZvP'), +( 453491 ,'zzZWV'), +( 756594 ,'ZZZXU'), +( 718061 ,'ZZzZH'); +SELECT * FROM t1 ORDER BY char_column DESC; +int_column char_column +718061 ZZzZH +756594 ZZZXU +453491 zzZWV +907955 zZZvP +782427 ZZZva +313801 zzzUM +37240 zZzTv +786036 zZzTO +970185 ZZzTN +9274 ZzZSX +4936 zzzsT +545592 zZzSD +39868 zZZRW +DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 524862af135..358ca5eb2cf 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -1549,4 +1549,31 @@ while ($cnt) --enable_query_log drop table t1; + +# +# Bug #31890 Partitions: ORDER BY DESC in InnoDB not working +# + +CREATE TABLE t1 +(int_column INT, char_column CHAR(5), +PRIMARY KEY(char_column,int_column)) +PARTITION BY KEY(char_column,int_column) +PARTITIONS 101; +INSERT INTO t1 (int_column, char_column) VALUES +( 39868 ,'zZZRW'), +( 545592 ,'zZzSD'), +( 4936 ,'zzzsT'), +( 9274 ,'ZzZSX'), +( 970185 ,'ZZzTN'), +( 786036 ,'zZzTO'), +( 37240 ,'zZzTv'), +( 313801 ,'zzzUM'), +( 782427 ,'ZZZva'), +( 907955 ,'zZZvP'), +( 453491 ,'zzZWV'), +( 756594 ,'ZZZXU'), +( 718061 ,'ZZzZH'); +SELECT * FROM t1 ORDER BY char_column DESC; +DROP TABLE t1; + --echo End of 5.1 tests diff --git a/mysys/queues.c b/mysys/queues.c index d8a7ca19bee..c0561e41dce 100644 --- a/mysys/queues.c +++ b/mysys/queues.c @@ -61,7 +61,7 @@ int init_queue(QUEUE *queue, uint max_elements, uint offset_to_key, queue->first_cmp_arg=first_cmp_arg; queue->max_elements=max_elements; queue->offset_to_key=offset_to_key; - queue->max_at_top= max_at_top ? (-1 ^ 1) : 0; + queue_set_max_at_top(queue, max_at_top); DBUG_RETURN(0); } @@ -137,7 +137,7 @@ int reinit_queue(QUEUE *queue, uint max_elements, uint offset_to_key, queue->compare=compare; queue->first_cmp_arg=first_cmp_arg; queue->offset_to_key=offset_to_key; - queue->max_at_top= max_at_top ? (-1 ^ 1) : 0; + queue_set_max_at_top(queue, max_at_top); resize_queue(queue, max_elements); DBUG_RETURN(0); } @@ -208,16 +208,14 @@ void delete_queue(QUEUE *queue) void queue_insert(register QUEUE *queue, uchar *element) { reg2 uint idx, next; - int cmp; DBUG_ASSERT(queue->elements < queue->max_elements); queue->root[0]= element; idx= ++queue->elements; /* max_at_top swaps the comparison if we want to order by desc */ - while ((cmp= queue->compare(queue->first_cmp_arg, - element + queue->offset_to_key, - queue->root[(next= idx >> 1)] + - queue->offset_to_key)) && - (cmp ^ queue->max_at_top) < 0) + while ((queue->compare(queue->first_cmp_arg, + element + queue->offset_to_key, + queue->root[(next= idx >> 1)] + + queue->offset_to_key) * queue->max_at_top) < 0) { queue->root[idx]= queue->root[next]; idx= next; @@ -287,19 +285,17 @@ void _downheap(register QUEUE *queue, uint idx) while (idx <= half_queue) { - int cmp; next_index=idx+idx; if (next_index < elements && (queue->compare(queue->first_cmp_arg, queue->root[next_index]+offset_to_key, - queue->root[next_index+1]+offset_to_key) ^ + queue->root[next_index+1]+offset_to_key) * queue->max_at_top) > 0) next_index++; if (first && - (((cmp=queue->compare(queue->first_cmp_arg, - queue->root[next_index]+offset_to_key, - element+offset_to_key)) == 0) || - ((cmp ^ queue->max_at_top) > 0))) + (((queue->compare(queue->first_cmp_arg, + queue->root[next_index]+offset_to_key, + element+offset_to_key) * queue->max_at_top) >= 0))) { queue->root[idx]= element; return; @@ -314,7 +310,7 @@ void _downheap(register QUEUE *queue, uint idx) { if ((queue->compare(queue->first_cmp_arg, queue->root[next_index]+offset_to_key, - element+offset_to_key) ^ + element+offset_to_key) * queue->max_at_top) < 0) break; queue->root[idx]=queue->root[next_index]; @@ -334,7 +330,6 @@ void _downheap(register QUEUE *queue, uint idx) { uchar *element; uint elements,half_queue,next_index,offset_to_key; - int cmp; offset_to_key=queue->offset_to_key; element=queue->root[idx]; @@ -346,13 +341,12 @@ void _downheap(register QUEUE *queue, uint idx) if (next_index < elements && (queue->compare(queue->first_cmp_arg, queue->root[next_index]+offset_to_key, - queue->root[next_index+1]+offset_to_key) ^ + queue->root[next_index+1]+offset_to_key) * queue->max_at_top) > 0) next_index++; - if ((cmp=queue->compare(queue->first_cmp_arg, - queue->root[next_index]+offset_to_key, - element+offset_to_key)) == 0 || - (cmp ^ queue->max_at_top) > 0) + if ((queue->compare(queue->first_cmp_arg, + queue->root[next_index]+offset_to_key, + element+offset_to_key) * queue->max_at_top) >= 0) break; queue->root[idx]=queue->root[next_index]; idx=next_index; -- cgit v1.2.1 From d70fe65b965662e05c8cff028b1b241c5cdfbd87 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 15 Nov 2007 14:12:59 +0200 Subject: merge of bug 31928 to 5.1-opt --- mysql-test/r/type_date.result | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index 99bdc4ecdfc..7d5dabf1929 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -211,7 +211,6 @@ a Warnings: Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 -Warning 1292 Incorrect date value: '0000-00-00' for column 'a' at row 1 SELECT * FROM t2 WHERE a = '0000-00-00'; a 0000-00-00 -- cgit v1.2.1 From 0cfa3267fb222a8ce15ab80865400ea4293295a5 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 16 Nov 2007 14:46:36 +0400 Subject: Bug#30294 blackhole engine causes 100% with 2 alter table statements running Implement neccessary shared lock structure for table locks. This is the backport of bug26241 fix. sql/ha_blackhole.cc: Implement neccessary shared lock structure for table locks. sql/ha_blackhole.h: Declare shared structure for table locks sql/handler.cc: added BLACKHOLE_DB case --- sql/ha_blackhole.cc | 122 +++++++++++++++++++++++++++++++++++++++++++++++----- sql/ha_blackhole.h | 18 +++++++- sql/handler.cc | 4 ++ 3 files changed, 132 insertions(+), 12 deletions(-) diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 3f4285ec595..93d085bbc88 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -22,6 +22,14 @@ #ifdef HAVE_BLACKHOLE_DB #include "ha_blackhole.h" +/* Static declarations for shared structures */ + +static pthread_mutex_t blackhole_mutex; +static HASH blackhole_open_tables; +static int blackhole_init= FALSE; + +static st_blackhole_share *get_share(const char *table_name); +static void free_share(st_blackhole_share *share); /* Blackhole storage engine handlerton */ @@ -30,7 +38,7 @@ handlerton blackhole_hton= { SHOW_OPTION_YES, "/dev/null storage engine (anything you write to it disappears)", DB_TYPE_BLACKHOLE_DB, - NULL, + blackhole_db_init, 0, /* slot */ 0, /* savepoint size. */ NULL, /* close_connection */ @@ -70,15 +78,18 @@ const char **ha_blackhole::bas_ext() const int ha_blackhole::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_blackhole::open"); - thr_lock_init(&thr_lock); - thr_lock_data_init(&thr_lock,&lock,NULL); + + if (!(share= get_share(name))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + thr_lock_data_init(&share->lock, &lock, NULL); DBUG_RETURN(0); } int ha_blackhole::close(void) { DBUG_ENTER("ha_blackhole::close"); - thr_lock_delete(&thr_lock); + free_share(share); DBUG_RETURN(0); } @@ -161,17 +172,23 @@ int ha_blackhole::external_lock(THD *thd, int lock_type) } -uint ha_blackhole::lock_count(void) const -{ - DBUG_ENTER("ha_blackhole::lock_count"); - DBUG_RETURN(0); -} - THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { DBUG_ENTER("ha_blackhole::store_lock"); + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables) + lock_type = TL_WRITE_ALLOW_WRITE; + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type = TL_READ; + + lock.type= lock_type; + } + *to++= &lock; DBUG_RETURN(to); } @@ -226,4 +243,89 @@ int ha_blackhole::index_last(byte * buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } + +static st_blackhole_share *get_share(const char *table_name) +{ + st_blackhole_share *share; + uint length; + + length= (uint) strlen(table_name); + pthread_mutex_lock(&blackhole_mutex); + + if (!(share= (st_blackhole_share*) hash_search(&blackhole_open_tables, + (byte*) table_name, length))) + { + if (!(share= (st_blackhole_share*) my_malloc(sizeof(st_blackhole_share) + + length, + MYF(MY_WME | MY_ZEROFILL)))) + goto error; + + share->table_name_length= length; + strmov(share->table_name, table_name); + + if (my_hash_insert(&blackhole_open_tables, (byte*) share)) + { + my_free((gptr) share, MYF(0)); + share= NULL; + goto error; + } + + thr_lock_init(&share->lock); + } + share->use_count++; + +error: + pthread_mutex_unlock(&blackhole_mutex); + return share; +} + +static void free_share(st_blackhole_share *share) +{ + pthread_mutex_lock(&blackhole_mutex); + if (!--share->use_count) + hash_delete(&blackhole_open_tables, (byte*) share); + pthread_mutex_unlock(&blackhole_mutex); +} + + +static byte* blackhole_get_key(st_blackhole_share *share, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= share->table_name_length; + return (byte*) share->table_name; +} + + +bool blackhole_db_init() +{ + DBUG_ENTER("blackhole_db_init"); + if (pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST)) + goto error; + if (hash_init(&blackhole_open_tables, &my_charset_bin, 32, 0, 0, + (hash_get_key) blackhole_get_key, 0, 0)) + { + VOID(pthread_mutex_destroy(&blackhole_mutex)); + } + else + { + blackhole_init= TRUE; + DBUG_RETURN(FALSE); + } +error: + have_blackhole_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); +} + + +bool blackhole_db_end() +{ + if (blackhole_init) + { + hash_free(&blackhole_open_tables); + VOID(pthread_mutex_destroy(&blackhole_mutex)); + } + blackhole_init= 0; + return FALSE; +} + #endif /* HAVE_BLACKHOLE_DB */ diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h index 4dcb37c637d..45ed0351457 100644 --- a/sql/ha_blackhole.h +++ b/sql/ha_blackhole.h @@ -17,6 +17,18 @@ #pragma interface /* gcc class implementation */ #endif + +/* + Shared structure for correct LOCK operation +*/ +struct st_blackhole_share { + THR_LOCK lock; + uint use_count; + uint table_name_length; + char table_name[1]; +}; + + /* Class definition for the blackhole storage engine "Dumbest named feature ever" @@ -24,7 +36,7 @@ class ha_blackhole: public handler { THR_LOCK_DATA lock; /* MySQL lock */ - THR_LOCK thr_lock; + st_blackhole_share *share; public: ha_blackhole(TABLE *table_arg); @@ -76,10 +88,12 @@ public: void position(const byte *record); int info(uint flag); int external_lock(THD *thd, int lock_type); - uint lock_count(void) const; int create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); }; + +bool blackhole_db_init(void); +bool blackhole_db_end(void); diff --git a/sql/handler.cc b/sql/handler.cc index bfb7e8c369f..27204ae725b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -515,6 +515,10 @@ int ha_panic(enum ha_panic_function flag) if (have_berkeley_db == SHOW_OPTION_YES) error|=berkeley_end(); #endif +#ifdef HAVE_BLACKHOLE_DB + if (have_blackhole_db == SHOW_OPTION_YES) + error|= blackhole_db_end(); +#endif #ifdef HAVE_INNOBASE_DB if (have_innodb == SHOW_OPTION_YES) error|=innobase_end(); -- cgit v1.2.1 From 1c1dd1f25c42081c7bf72042ccfcb83896298aab Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 16 Nov 2007 13:58:09 +0300 Subject: Fix for bug #32241: memory corruption due to large index map in 'Range checked for each record' The problem was in incorrectly calculated length of the buffer used to store a hexadecimal representation of an index map in select_describe(). This could result in buffer overrun and stack corruption under some circumstances. Fixed by correcting the calculation. mysql-test/r/explain.result: Added a test case for bug #32241. mysql-test/t/explain.test: Added a test case for bug #32241. sql/sql_select.cc: Corrected the buffer length calculation. Count one hex digit as 4 bits, not 8. --- mysql-test/r/explain.result | 20 ++++++++++++++++++++ mysql-test/t/explain.test | 28 ++++++++++++++++++++++++++++ sql/sql_select.cc | 3 ++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/explain.result b/mysql-test/r/explain.result index 24ff44945bf..a4c8432d2a4 100644 --- a/mysql-test/r/explain.result +++ b/mysql-test/r/explain.result @@ -87,3 +87,23 @@ Warnings: Note 1003 select '1' AS `f1`,'1' AS `f2` from `test`.`t1` having 1 drop view v1; drop table t1; +CREATE TABLE t1(c INT); +INSERT INTO t1 VALUES (),(); +CREATE TABLE t2 (b INT, +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b)); +INSERT INTO t2 VALUES (),(),(); +EXPLAIN SELECT 1 FROM +(SELECT 1 FROM t2,t1 WHERE b < c GROUP BY 1 LIMIT 1) AS d2; +id select_type table type possible_keys key key_len ref rows Extra +X X X X X X X X X const row not found +X X X X X X X X X +X X X X X X X X X Range checked for each record (index map: 0xFFFFFFFFFF) +DROP TABLE t2; +DROP TABLE t1; diff --git a/mysql-test/t/explain.test b/mysql-test/t/explain.test index 04cf37f457a..c9ae8aceaf6 100644 --- a/mysql-test/t/explain.test +++ b/mysql-test/t/explain.test @@ -66,4 +66,32 @@ explain extended select * from t1 having 1; drop view v1; drop table t1; +# +# Bug #32241: memory corruption due to large index map in 'Range checked for +# each record' +# + +CREATE TABLE t1(c INT); +INSERT INTO t1 VALUES (),(); + +CREATE TABLE t2 (b INT, +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b), +KEY(b),KEY(b),KEY(b),KEY(b),KEY(b)); + +INSERT INTO t2 VALUES (),(),(); + +# We only need to make sure that there is no buffer overrun and the index map +# is displayed correctly +--replace_column 1 X 2 X 3 X 4 X 5 X 6 X 7 X 8 X 9 X +EXPLAIN SELECT 1 FROM + (SELECT 1 FROM t2,t1 WHERE b < c GROUP BY 1 LIMIT 1) AS d2; +DROP TABLE t2; +DROP TABLE t1; + # End of 5.0 tests. diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 24d1639edf1..4ff80185a85 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -15282,7 +15282,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { if (tab->use_quick == 2) { - char buf[MAX_KEY/8+1]; + /* 4 bits per 1 hex digit + terminating '\0' */ + char buf[MAX_KEY / 4 + 1]; extra.append(STRING_WITH_LEN("; Range checked for each " "record (index map: 0x")); extra.append(tab->keys.print(buf)); -- cgit v1.2.1 From 5e9933d7ff7d7fdd0e8b117b0344b127d5f23ffa Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 16 Nov 2007 17:43:15 +0100 Subject: Bug #32180: DATE_ADD treats datetime numeric argument as DATE instead of DATETIME This is a regression from 2007-05-18 when code to zero out the returned struct was added to number_to_datetime(); zero for time_type corresponds to MYSQL_TIMESTAMP_DATE. We now explicitly set the type we return (MYSQL_TIMESTAMP_DATETIME). mysql-test/r/func_time.result: show that DATE_ADD() behaves the same for YYYYMMDDhhmmss given as string and as integer. mysql-test/t/func_time.test: show that DATE_ADD() behaves the same for YYYYMMDDhhmmss given as string and as integer. sql-common/my_time.c: explictly set return type in number_to_datetime() --- mysql-test/r/func_time.result | 12 ++++++++++++ mysql-test/t/func_time.test | 10 ++++++++++ sql-common/my_time.c | 9 ++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 74859be4d04..71234385c0d 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1270,4 +1270,16 @@ select concat(a,ifnull(min(date_format(now(), '%Y-%m-%d')),' ull')) from t1; ERROR HY000: Illegal mix of collations (ascii_general_ci,IMPLICIT) and (latin1_swedish_ci,COERCIBLE) for operation 'concat' set lc_time_names=en_US; drop table t1; +select DATE_ADD('20071108181000', INTERVAL 1 DAY); +DATE_ADD('20071108181000', INTERVAL 1 DAY) +2007-11-09 18:10:00 +select DATE_ADD(20071108181000, INTERVAL 1 DAY); +DATE_ADD(20071108181000, INTERVAL 1 DAY) +2007-11-09 18:10:00 +select DATE_ADD('20071108', INTERVAL 1 DAY); +DATE_ADD('20071108', INTERVAL 1 DAY) +2007-11-09 +select DATE_ADD(20071108, INTERVAL 1 DAY); +DATE_ADD(20071108, INTERVAL 1 DAY) +2007-11-09 End of 5.0 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index c0a449ac3f4..f8249b7cf7c 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -787,4 +787,14 @@ select concat(a,ifnull(min(date_format(now(), '%Y-%m-%d')),' ull')) from t1; set lc_time_names=en_US; drop table t1; +# +# Bug#32180: DATE_ADD treats datetime numeric argument as DATE +# instead of DATETIME +# + +select DATE_ADD('20071108181000', INTERVAL 1 DAY); +select DATE_ADD(20071108181000, INTERVAL 1 DAY); +select DATE_ADD('20071108', INTERVAL 1 DAY); +select DATE_ADD(20071108, INTERVAL 1 DAY); + --echo End of 5.0 tests diff --git a/sql-common/my_time.c b/sql-common/my_time.c index f5d5828e024..a39cc754c88 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -1113,9 +1113,14 @@ longlong number_to_datetime(longlong nr, MYSQL_TIME *time_res, long part1,part2; *was_cut= 0; + bzero((char*) time_res, sizeof(*time_res)); + time_res->time_type=MYSQL_TIMESTAMP_DATE; if (nr == LL(0) || nr >= LL(10000101000000)) + { + time_res->time_type=MYSQL_TIMESTAMP_DATETIME; goto ok; + } if (nr < 101) goto err; if (nr <= (YY_PART_YEAR-1)*10000L+1231L) @@ -1139,6 +1144,9 @@ longlong number_to_datetime(longlong nr, MYSQL_TIME *time_res, } if (nr < 101000000L) goto err; + + time_res->time_type=MYSQL_TIMESTAMP_DATETIME; + if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) { nr= nr+LL(20000000000000); /* YYMMDDHHMMSS, 2000-2069 */ @@ -1152,7 +1160,6 @@ longlong number_to_datetime(longlong nr, MYSQL_TIME *time_res, ok: part1=(long) (nr/LL(1000000)); part2=(long) (nr - (longlong) part1*LL(1000000)); - bzero((char*) time_res, sizeof(*time_res)); time_res->year= (int) (part1/10000L); part1%=10000L; time_res->month= (int) part1 / 100; time_res->day= (int) part1 % 100; -- cgit v1.2.1 From 8a8d634b23f38b1d7eb454036fc734e8e7d27966 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 17 Nov 2007 17:11:05 +0400 Subject: merging --- mysql-test/r/gis.result | 66 ++----------------------------------------------- 1 file changed, 2 insertions(+), 64 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 334a285b93f..84b7b449cb5 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -680,7 +680,7 @@ def test t1 t1 g g 255 4294967295 0 Y 144 0 63 g select asbinary(g) from t1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def asbinary(g) 252 8192 0 Y 128 0 63 +def asbinary(g) 252 4294967295 0 Y 128 0 63 asbinary(g) drop table t1; create table t1 (a TEXT, b GEOMETRY NOT NULL, SPATIAL KEY(b)); @@ -730,69 +730,6 @@ point(b, b) IS NULL linestring(b) IS NULL polygon(b) IS NULL multipoint(b) IS NU 1 1 1 1 1 1 1 0 1 1 1 1 1 1 drop table t1; -select (asWKT(geomfromwkb((0x000000000140240000000000004024000000000000)))); -(asWKT(geomfromwkb((0x000000000140240000000000004024000000000000)))) -POINT(10 10) -select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); -(asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))) -POINT(10 10) -create table t1 (g GEOMETRY); -select * from t1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def test t1 t1 g g 255 4294967295 0 Y 144 0 63 -g -select asbinary(g) from t1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def asbinary(g) 252 4294967295 0 Y 128 0 63 -asbinary(g) -drop table t1; -create table t1 (a TEXT, b GEOMETRY NOT NULL, SPATIAL KEY(b)); -alter table t1 disable keys; -load data infile '../../std_data/bad_gis_data.dat' into table t1; -ERROR 01000: Data truncated; NULL supplied to NOT NULL column 'b' at row 1 -alter table t1 enable keys; -drop table t1; -create table t1 (a int, b blob); -insert into t1 values (1, ''), (2, NULL), (3, '1'); -select * from t1; -a b -1 -2 NULL -3 1 -select -geometryfromtext(b) IS NULL, geometryfromwkb(b) IS NULL, astext(b) IS NULL, -aswkb(b) IS NULL, geometrytype(b) IS NULL, centroid(b) IS NULL, -envelope(b) IS NULL, startpoint(b) IS NULL, endpoint(b) IS NULL, -exteriorring(b) IS NULL, pointn(b, 1) IS NULL, geometryn(b, 1) IS NULL, -interiorringn(b, 1) IS NULL, multipoint(b) IS NULL, isempty(b) IS NULL, -issimple(b) IS NULL, isclosed(b) IS NULL, dimension(b) IS NULL, -numgeometries(b) IS NULL, numinteriorrings(b) IS NULL, numpoints(b) IS NULL, -area(b) IS NULL, glength(b) IS NULL, srid(b) IS NULL, x(b) IS NULL, -y(b) IS NULL -from t1; -geometryfromtext(b) IS NULL geometryfromwkb(b) IS NULL astext(b) IS NULL aswkb(b) IS NULL geometrytype(b) IS NULL centroid(b) IS NULL envelope(b) IS NULL startpoint(b) IS NULL endpoint(b) IS NULL exteriorring(b) IS NULL pointn(b, 1) IS NULL geometryn(b, 1) IS NULL interiorringn(b, 1) IS NULL multipoint(b) IS NULL isempty(b) IS NULL issimple(b) IS NULL isclosed(b) IS NULL dimension(b) IS NULL numgeometries(b) IS NULL numinteriorrings(b) IS NULL numpoints(b) IS NULL area(b) IS NULL glength(b) IS NULL srid(b) IS NULL x(b) IS NULL y(b) IS NULL -1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -select -within(b, b) IS NULL, contains(b, b) IS NULL, overlaps(b, b) IS NULL, -equals(b, b) IS NULL, disjoint(b, b) IS NULL, touches(b, b) IS NULL, -intersects(b, b) IS NULL, crosses(b, b) IS NULL -from t1; -within(b, b) IS NULL contains(b, b) IS NULL overlaps(b, b) IS NULL equals(b, b) IS NULL disjoint(b, b) IS NULL touches(b, b) IS NULL intersects(b, b) IS NULL crosses(b, b) IS NULL -1 1 1 1 1 1 1 1 -1 1 1 1 1 1 1 1 -1 1 1 1 1 1 1 1 -select -point(b, b) IS NULL, linestring(b) IS NULL, polygon(b) IS NULL, multipoint(b) IS NULL, -multilinestring(b) IS NULL, multipolygon(b) IS NULL, -geometrycollection(b) IS NULL -from t1; -point(b, b) IS NULL linestring(b) IS NULL polygon(b) IS NULL multipoint(b) IS NULL multilinestring(b) IS NULL multipolygon(b) IS NULL geometrycollection(b) IS NULL -0 1 1 1 1 1 1 -1 1 1 1 1 1 1 -0 1 1 1 1 1 1 -drop table t1; CREATE TABLE t1(a POINT) ENGINE=MyISAM; INSERT INTO t1 VALUES (NULL); SELECT * FROM t1; @@ -1020,3 +957,4 @@ COUNT(*) 2 DROP TABLE t1, t2; End of 5.0 tests +End of 5.0 tests -- cgit v1.2.1 From 99054db64c8a41acbe570756e286a8d78811197c Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 18 Nov 2007 00:02:55 +0400 Subject: Fixed bug #32335. Comparison of a BIGINT NOT NULL column with a constant arithmetic expression that evaluates to NULL caused error 1048: "Column '...' cannot be null". Made convert_constant_item() check if the constant expression is NULL before attempting to store it in a field. Attempts to store NULL in a NOT NULL field caused query errors. sql/item_cmpfunc.cc: Fixed bug #32335. 1. Made convert_constant_item() check if the constant expression is NULL before attempting to store it in a field. Attempts to store NULL in a NOT NULL field caused query errors. 2. Also minor bug has been fixed: the thd->count_cuted_fields value was not restored in case of successful conversion. mysql-test/t/select.test: Added test case for bug #32335. mysql-test/r/select.result: Added test case for bug #32335. --- mysql-test/r/select.result | 6 ++++++ mysql-test/t/select.test | 11 +++++++++++ sql/item_cmpfunc.cc | 3 ++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result index a1ba58a536c..3ca84bcf34b 100644 --- a/mysql-test/r/select.result +++ b/mysql-test/r/select.result @@ -4322,4 +4322,10 @@ c3 DROP TABLE t1; DROP TABLE t2; +CREATE TABLE t1 (c1 BIGINT NOT NULL); +INSERT INTO t1 (c1) VALUES (1); +SELECT * FROM t1 WHERE c1 > NULL + 1; +c1 +DROP TABLE t1; + End of 5.0 tests diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test index 31c8a3f7d11..c48f2771aa8 100644 --- a/mysql-test/t/select.test +++ b/mysql-test/t/select.test @@ -3661,4 +3661,15 @@ DROP TABLE t2; ########################################################################### +# +# Bug #32335: Error on BIGINT > NULL + 1 +# + +CREATE TABLE t1 (c1 BIGINT NOT NULL); +INSERT INTO t1 (c1) VALUES (1); +SELECT * FROM t1 WHERE c1 > NULL + 1; +DROP TABLE t1; + +--echo + --echo End of 5.0 tests diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 85ec8fa40d6..f9744baf19e 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -350,11 +350,12 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item) thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; thd->count_cuted_fields= CHECK_FIELD_IGNORE; - if (!(*item)->save_in_field(field, 1) && !((*item)->null_value)) + if (!(*item)->is_null() && !(*item)->save_in_field(field, 1)) { Item *tmp=new Item_int_with_ref(field->val_int(), *item, test(field->flags & UNSIGNED_FLAG)); thd->variables.sql_mode= orig_sql_mode; + thd->count_cuted_fields= orig_count_cuted_fields; if (tmp) thd->change_item_tree(item, tmp); return 1; // Item was replaced -- cgit v1.2.1 From fe54b274db6b900aa7911769f860dca721a6e81f Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 18 Nov 2007 00:32:06 +0400 Subject: Bug #32211 Test 'windows' and 'windows_shm' failed for embedded server failing 'INSTALL PLUGIN' statement doesn't work in embedded server as we disable library loading there. Fixed by enabling loading libraries (#define HAVE_DLOPEN), what also makes UDF working in the embedded server. include/mysql_embed.h: Bug #32211 Test 'windows' and 'windows_shm' failed for embedded server Let loading libraries in the embedded server libmysqld/CMakeLists.txt: Bug #32211 Test 'windows' and 'windows_shm' failed for embedded server let loading libraries in the embedded server mysql-test/t/windows.test: Bug #32211 Test 'windows' and 'windows_shm' failed for embedded server make sure proc_1() doesn't exists before we start --- include/mysql_embed.h | 1 - libmysqld/CMakeLists.txt | 2 +- mysql-test/t/windows.test | 3 +++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/mysql_embed.h b/include/mysql_embed.h index 7416283d83d..4a7fd3ef63c 100644 --- a/include/mysql_embed.h +++ b/include/mysql_embed.h @@ -21,7 +21,6 @@ /* TODO HF add #undef HAVE_VIO if we don't want client in embedded library */ #undef HAVE_PSTACK /* No stacktrace */ -#undef HAVE_DLOPEN /* No udf functions */ #undef HAVE_OPENSSL #undef HAVE_SMEM /* No shared memory */ #undef HAVE_NDBCLUSTER_DB /* No NDB cluster */ diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index fb3316c303c..9fa7d46d466 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -22,7 +22,7 @@ IF(WIN32) ADD_DEFINITIONS(-DUSE_TLS) ENDIF(WIN32) -ADD_DEFINITIONS(-DMYSQL_SERVER -DEMBEDDED_LIBRARY) +ADD_DEFINITIONS(-DMYSQL_SERVER -DEMBEDDED_LIBRARY -DHAVE_DLOPEN) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/libmysqld diff --git a/mysql-test/t/windows.test b/mysql-test/t/windows.test index 0be81a95b9a..92ca0c3a3ec 100644 --- a/mysql-test/t/windows.test +++ b/mysql-test/t/windows.test @@ -36,6 +36,9 @@ EXPLAIN SELECT * FROM t1 WHERE b = (SELECT max(2)); --echo End of 5.0 tests. +--disable_warnings +drop procedure if exists proc_1; +--enable_warnings # # Bug #20665: All commands supported in Stored Procedures should work in # Prepared Statements -- cgit v1.2.1 From d6f10342b13ed73633b0a14f3bca6cf2807ef00e Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 18 Nov 2007 17:33:12 +0400 Subject: test fixed --- mysql-test/r/windows.result | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/r/windows.result b/mysql-test/r/windows.result index c97d3dd8867..e58a891838a 100644 --- a/mysql-test/r/windows.result +++ b/mysql-test/r/windows.result @@ -19,6 +19,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used End of 5.0 tests. +drop procedure if exists proc_1; create procedure proc_1() install plugin my_plug soname '\\root\\some_plugin.dll'; call proc_1(); ERROR HY000: No paths allowed for shared library -- cgit v1.2.1 From da1efa3387e62df85d48f05e81ac9b324c455e83 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 11:03:03 +0400 Subject: Bug #30284 spatial key corruption. SPATIAL key is fine actually, but the chk_key() function mistakenly returns error. It tries to compare checksums of btree and SPATIAL keys while the checksum for the SPATIAL isn't calculated (always 0). Same thing with FULLTEXT keys is handled using full_text_keys counter, so fixed by counting both SPATIAL and FULLTEXT keys in that counter. myisam/mi_check.c: Bug #30284 spatial key corruption full_text_keys counts both FULL_TEXT and SPATIAL keys mysql-test/r/gis.result: Bug #30284 spatial key corruption test result mysql-test/t/gis.test: Bug #30284 spatial key corruption. test case --- myisam/mi_check.c | 2 +- mysql-test/r/gis.result | 8 ++++++++ mysql-test/t/gis.test | 11 +++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/myisam/mi_check.c b/myisam/mi_check.c index ce8fb04874e..64aa0b76877 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -454,7 +454,7 @@ int chk_key(MI_CHECK *param, register MI_INFO *info) if ((!(param->testflag & T_SILENT))) printf ("- check data record references index: %d\n",key+1); - if (keyinfo->flag & HA_FULLTEXT) + if (keyinfo->flag & (HA_FULLTEXT | HA_SPATIAL)) full_text_keys++; if (share->state.key_root[key] == HA_OFFSET_ERROR && (info->state->records == 0 || keyinfo->flag & HA_FULLTEXT)) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 55f70e59fcf..3c5b0491148 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -730,4 +730,12 @@ select geomfromtext(col9,col89) as a from t1; a NULL DROP TABLE t1; +create table t1(col1 geometry not null,col15 geometrycollection not +null,spatial index(col15),index(col1(15)))engine=myisam; +insert into t1 set col15 = GeomFromText('POINT(6 5)'); +insert into t1 set col15 = GeomFromText('POINT(6 5)'); +check table t1 extended; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; End of 4.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index cf5c3b31bc1..77e73e0d590 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -427,4 +427,15 @@ INSERT INTO `t1` VALUES ('','0000-00-00'); select geomfromtext(col9,col89) as a from t1; DROP TABLE t1; +# +# Bug #30284 spatial key corruption +# + +create table t1(col1 geometry not null,col15 geometrycollection not +null,spatial index(col15),index(col1(15)))engine=myisam; +insert into t1 set col15 = GeomFromText('POINT(6 5)'); +insert into t1 set col15 = GeomFromText('POINT(6 5)'); +check table t1 extended; +drop table t1; + --echo End of 4.1 tests -- cgit v1.2.1 From b15802298b53f0627ab10739c1e54cfc22434e1a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 12:42:25 +0400 Subject: Bug#32158 Crash in open_table_from_share, on mysql_unpack_partition errors added check for the result of mysql_unpack_partition() sql/table.cc: added check for the result of mysql_unpack_partition() --- sql/table.cc | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index c3ddb809b9e..c30407920f4 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1787,13 +1787,18 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, outparam, is_create_table, share->default_part_db_type, &work_part_info_used); - if (!tmp) - outparam->part_info->is_auto_partitioned= share->auto_partitioned; + if (tmp) + { + thd->stmt_arena= backup_stmt_arena_ptr; + thd->restore_active_arena(&part_func_arena, &backup_arena); + goto partititon_err; + } + outparam->part_info->is_auto_partitioned= share->auto_partitioned; DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned)); /* we should perform the fix_partition_func in either local or caller's arena depending on work_part_info_used value */ - if (!tmp && !work_part_info_used) + if (!work_part_info_used) tmp= fix_partition_func(thd, outparam, is_create_table); thd->stmt_arena= backup_stmt_arena_ptr; thd->restore_active_arena(&part_func_arena, &backup_arena); @@ -1803,6 +1808,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, tmp= fix_partition_func(thd, outparam, is_create_table); outparam->part_info->item_free_list= part_func_arena.free_list; } +partititon_err: if (tmp) { if (is_create_table) -- cgit v1.2.1 From 7d50a31b72be061ea6b9200b8118b8d01e60cf9c Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 10:47:26 +0100 Subject: ndb - bug#22696 Fix handling of multiple redo phases - set correct state - handle 4 phases, by moving START_FRAGCONF to end of phase instead of beginning... storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp: remove unused state ACTIVE_REMOVE_AFTER remove unused variable nextLcp remove unused methods (BUILD_INDX) add counter of outstanding GSN_EXEC_FRAGREQ remove c_redo_complete_fragments cause it fits badly with functional multi-redo-phases storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp: remove unused stuff storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: 1) set correct state before starting next non-empty REDO phase 2) move GSN_START_FRAGCONF to execFRAGCONF instead of when sending GSN_EXEC_FRAGREQ or fragments with 4 redo-phases would never be complete... storage/ndb/test/ndbapi/testSystemRestart.cpp: testcase storage/ndb/test/run-test/daily-basic-tests.txt: testcase --- storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 19 +- storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp | 4 - storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 201 +++++----------------- storage/ndb/test/ndbapi/testSystemRestart.cpp | 41 ++++- storage/ndb/test/run-test/daily-basic-tests.txt | 3 + 5 files changed, 96 insertions(+), 172 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 95cad98b81c..62add0cf503 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -585,7 +585,6 @@ public: enum ExecSrStatus { IDLE = 0, - ACTIVE_REMOVE_AFTER = 1, ACTIVE = 2 }; /** @@ -869,11 +868,6 @@ public: * heard of. */ Uint8 fragDistributionKey; - /** - * The identity of the next local checkpoint this fragment - * should perform. - */ - Uint8 nextLcp; /** * How many local checkpoints does the fragment contain */ @@ -2097,10 +2091,6 @@ private: void execEXEC_SRCONF(Signal* signal); void execREAD_PSEUDO_REQ(Signal* signal); - void build_acc(Signal*, Uint32 fragPtrI); - void execBUILDINDXREF(Signal*signal); - void execBUILDINDXCONF(Signal*signal); - void execDUMP_STATE_ORD(Signal* signal); void execACC_ABORTCONF(Signal* signal); void execNODE_FAILREP(Signal* signal); @@ -2780,7 +2770,13 @@ private: /*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */ /*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */ /* ------------------------------------------------------------------------- */ - UintR cnoFragmentsExecSr; + Uint32 cnoFragmentsExecSr; + + /** + * This is no of sent GSN_EXEC_FRAGREQ during this log phase + */ + Uint32 cnoOutstandingExecFragReq; + /* ------------------------------------------------------------------------- */ /*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */ /*HAVE COMPLETED. */ @@ -2801,7 +2797,6 @@ private: DLFifoList c_lcp_waiting_fragments; // StartFragReq'ed DLFifoList c_lcp_restoring_fragments; // Restoring as we speek DLFifoList c_lcp_complete_fragments; // Restored - DLFifoList c_redo_complete_fragments; // Redo'ed /* ------------------------------------------------------------------------- */ /*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index db6d201575f..b3a3d512da7 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -168,7 +168,6 @@ Dblqh::Dblqh(Block_context& ctx): c_lcp_waiting_fragments(c_fragment_pool), c_lcp_restoring_fragments(c_fragment_pool), c_lcp_complete_fragments(c_fragment_pool), - c_redo_complete_fragments(c_fragment_pool), m_commitAckMarkerHash(m_commitAckMarkerPool), c_scanTakeOverHash(c_scanRecordPool) { @@ -295,9 +294,6 @@ Dblqh::Dblqh(Block_context& ctx): addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ); - addRecSignal(GSN_BUILDINDXREF, &Dblqh::execBUILDINDXREF); - addRecSignal(GSN_BUILDINDXCONF, &Dblqh::execBUILDINDXCONF); - addRecSignal(GSN_DEFINE_BACKUP_REF, &Dblqh::execDEFINE_BACKUP_REF); addRecSignal(GSN_DEFINE_BACKUP_CONF, &Dblqh::execDEFINE_BACKUP_CONF); diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index e0449e08ddd..51dff6aa4f4 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -356,7 +356,6 @@ void Dblqh::execCONTINUEB(Signal* signal) break; case ZSR_PHASE3_START: jam(); - signal->theData[0] = data0; srPhase3Start(signal); return; break; @@ -428,25 +427,25 @@ void Dblqh::execCONTINUEB(Signal* signal) if (fragptr.i != RNIL) { jam(); - c_redo_complete_fragments.getPtr(fragptr); + c_lcp_complete_fragments.getPtr(fragptr); signal->theData[0] = fragptr.p->tabRef; signal->theData[1] = fragptr.p->fragId; sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); Ptr save = fragptr; - c_redo_complete_fragments.next(fragptr); + c_lcp_complete_fragments.next(fragptr); signal->theData[0] = ZENABLE_EXPAND_CHECK; signal->theData[1] = fragptr.i; sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); - c_redo_complete_fragments.remove(save); + c_lcp_complete_fragments.remove(save); return; } else { jam(); cstartRecReq = 2; - ndbrequire(c_redo_complete_fragments.isEmpty()); + ndbrequire(c_lcp_complete_fragments.isEmpty()); StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); conf->startingNodeId = getOwnNodeId(); sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, @@ -1121,7 +1120,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint32 minRowsHigh = req->minRowsHigh; Uint32 tschemaVersion = req->schemaVersion; Uint32 ttupKeyLength = req->keyLength; - Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; @@ -1214,7 +1212,6 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE; }//if - fragptr.p->nextLcp = nextLcp; //---------------------------------------------- // For node restarts it is not necessarily zero //---------------------------------------------- @@ -8939,6 +8936,9 @@ void Dblqh::storedProcConfScanLab(Signal* signal) case Fragrecord::REMOVING: jam(); default: + jamLine(fragptr.p->fragStatus); + ndbout_c("fragptr.p->fragStatus: %u", + fragptr.p->fragStatus); ndbrequire(false); break; }//switch @@ -14119,6 +14119,8 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0]; jamEntry(); + printSTART_FRAG_REQ(stdout, signal->getDataPtr(), signal->getLength(), number()); + tabptr.i = startFragReq->tableId; Uint32 fragId = startFragReq->fragId; @@ -14141,15 +14143,12 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) if (lcpNo == (MAX_LCP_STORED - 1)) { jam(); fragptr.p->lcpId[lcpNo] = lcpId; - fragptr.p->nextLcp = 0; } else if (lcpNo < (MAX_LCP_STORED - 1)) { jam(); fragptr.p->lcpId[lcpNo] = lcpId; - fragptr.p->nextLcp = lcpNo + 1; } else { ndbrequire(lcpNo == ZNIL); jam(); - fragptr.p->nextLcp = 0; }//if fragptr.p->srNoLognodes = noOfLogNodes; fragptr.p->logFlag = Fragrecord::STATE_FALSE; @@ -14181,19 +14180,9 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) */ c_lcp_complete_fragments.add(fragptr); - if(lcpNo == ZNIL) - { - signal->theData[0] = tabptr.i; - signal->theData[1] = fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - } - - if (getNodeState().getNodeRestartInProgress()) - { - jam(); - fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION; - } - + signal->theData[0] = tabptr.i; + signal->theData[1] = fragId; + sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL); jamEntry(); return; @@ -14395,65 +14384,9 @@ void Dblqh::execSTART_RECCONF(Signal* signal) return; } - c_lcp_complete_fragments.first(fragptr); - build_acc(signal, fragptr.i); - return; -}//Dblqh::execSTART_RECCONF() - -void -Dblqh::build_acc(Signal* signal, Uint32 fragPtrI) -{ - fragptr.i = fragPtrI; - while(fragptr.i != RNIL) - { - c_lcp_complete_fragments.getPtr(fragptr); - tabptr.i = fragptr.p->tabRef; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - if(true || fragptr.i != tabptr.p->fragrec[0]) - { - // Only need to send 1 build per table, TUP will rebuild all - fragptr.i = fragptr.p->nextList; - continue; - } - - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(fragptr.i); - req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART); - req->setBuildId(0); // not used - req->setBuildKey(0); // not used - req->setIndexType(RNIL); - req->setIndexId(RNIL); - req->setTableId(tabptr.i); - req->setParallelism(0); - - sendSignal(DBTUP_REF, GSN_BUILDINDXREQ, signal, - BuildIndxReq::SignalLength, JBB); - return; - } - startExecSr(signal); } -void -Dblqh::execBUILDINDXREF(Signal* signal) -{ - ndbrequire(false); -} - -void -Dblqh::execBUILDINDXCONF(Signal* signal) -{ - BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend(); - Uint32 fragPtrI = conf->getConnectionPtr(); - - fragptr.i = fragPtrI; - c_fragment_pool.getPtr(fragptr); - infoEvent("LQH: primary key index %u rebuild done", fragptr.p->tabRef); - build_acc(signal, fragptr.p->nextList); -} - /* ***************> */ /* START_RECREF > */ /* ***************> */ @@ -14472,9 +14405,9 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal) fragptr.i = signal->theData[0]; Uint32 next = RNIL; - if (fragptr.i == RNIL) { + if (fragptr.i == RNIL) + { jam(); - ndbrequire(cnoOfNodes < MAX_NDB_NODES); /* ---------------------------------------------------------------------- * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON. * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL @@ -14490,10 +14423,15 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal) } else { jam(); c_lcp_complete_fragments.getPtr(fragptr); - if (fragptr.p->srNoLognodes > csrPhasesCompleted) { + next = fragptr.p->nextList; + + if (fragptr.p->srNoLognodes > csrPhasesCompleted) + { jam(); + cnoOutstandingExecFragReq++; + Uint32 index = csrPhasesCompleted; - arrGuard(index, 4); + arrGuard(index, MAX_LOG_EXEC); BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]); fragptr.p->srStatus = Fragrecord::SS_STARTED; @@ -14512,34 +14450,7 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal) sendSignal(ref, GSN_EXEC_FRAGREQ, signal, ExecFragReq::SignalLength, JBB); - next = fragptr.p->nextList; - } else { - jam(); - /* -------------------------------------------------------------------- - * THIS FRAGMENT IS NOW FINISHED WITH THE SYSTEM RESTART. IT DOES - * NOT NEED TO PARTICIPATE IN ANY MORE PHASES. REMOVE IT FROM THE - * LIST OF COMPLETED FRAGMENTS TO EXECUTE THE LOG ON. - * ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE - * FRAGMENT. - * ------------------------------------------------------------------- */ - next = fragptr.p->nextList; - c_lcp_complete_fragments.remove(fragptr); - c_redo_complete_fragments.add(fragptr); - - if (!getNodeState().getNodeRestartInProgress()) - { - fragptr.p->logFlag = Fragrecord::STATE_TRUE; - fragptr.p->fragStatus = Fragrecord::FSACTIVE; - } - else - { - fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION; - } - signal->theData[0] = fragptr.p->srUserptr; - signal->theData[1] = cownNodeid; - sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB); - - } //if + } signal->theData[0] = next; sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); }//if @@ -14560,24 +14471,8 @@ void Dblqh::execEXEC_FRAGREQ(Signal* signal) tabptr.i = execFragReq->tableId; Uint32 fragId = execFragReq->fragId; ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - if (!getFragmentrec(signal, fragId)) { - jam(); - if (!insertFragrec(signal, fragId)) { - jam(); - sendExecFragRefLab(signal); - return; - }//if - initFragrec(signal, tabptr.i, fragId, ZLOG_NODE); - fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER; - } else { - jam(); - if (fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER) { - jam(); - fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER; - } else { - jam(); - }//if - }//if + ndbrequire(getFragmentrec(signal, fragId)); + ndbrequire(fragptr.p->execSrNoReplicas < 4); fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef; fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr; @@ -14610,6 +14505,21 @@ void Dblqh::execEXEC_FRAGCONF(Signal* signal) fragptr.i = signal->theData[0]; c_fragment_pool.getPtr(fragptr); fragptr.p->srStatus = Fragrecord::SS_COMPLETED; + + ndbrequire(cnoOutstandingExecFragReq); + cnoOutstandingExecFragReq--; + if (fragptr.p->srNoLognodes == csrPhasesCompleted + 1) + { + jam(); + + fragptr.p->logFlag = Fragrecord::STATE_TRUE; + fragptr.p->fragStatus = Fragrecord::FSACTIVE; + + signal->theData[0] = fragptr.p->srUserptr; + signal->theData[1] = cownNodeid; + sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB); + } + return; }//Dblqh::execEXEC_FRAGCONF() @@ -14633,6 +14543,7 @@ void Dblqh::execEXEC_SRCONF(Signal* signal) Uint32 nodeId = signal->theData[0]; arrGuard(nodeId, MAX_NDB_NODES); m_sr_exec_sr_conf.set(nodeId); + if (!m_sr_nodes.equal(m_sr_exec_sr_conf)) { jam(); @@ -14653,16 +14564,8 @@ void Dblqh::execEXEC_SRCONF(Signal* signal) * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE * NEXT PHASE. * ----------------------------------------------------------------------- */ - c_lcp_complete_fragments.first(fragptr); - while (fragptr.i != RNIL) - { - jam(); - if(fragptr.p->srStatus != Fragrecord::SS_COMPLETED) - { - return; - } - c_lcp_complete_fragments.next(fragptr); - } + ndbrequire(cnoOutstandingExecFragReq == 0); + execSrCompletedLab(signal); return; }//Dblqh::execEXEC_SRCONF() @@ -14718,6 +14621,7 @@ void Dblqh::execSrCompletedLab(Signal* signal) * THERE ARE YET MORE PHASES TO RESTART. * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL. * --------------------------------------------------------------------- */ + csrPhaseStarted = ZSR_PHASE1_COMPLETED; // Set correct state first... startExecSr(signal); }//if return; @@ -14791,7 +14695,8 @@ void Dblqh::srPhase3Start(Signal* signal) UintR tsrPhaseStarted; jamEntry(); - tsrPhaseStarted = signal->theData[0]; + + tsrPhaseStarted = signal->theData[1]; if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) { jam(); csrPhaseStarted = tsrPhaseStarted; @@ -15968,18 +15873,6 @@ void Dblqh::sendExecConf(Signal* signal) sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF, signal, 1, JBB); }//for - if (fragptr.p->execSrStatus == Fragrecord::ACTIVE) { - jam(); - fragptr.p->execSrStatus = Fragrecord::IDLE; - } else { - ndbrequire(fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER); - jam(); - Uint32 fragId = fragptr.p->fragId; - tabptr.i = fragptr.p->tabRef; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - c_lcp_complete_fragments.remove(fragptr); - deleteFragrec(fragId); - }//if fragptr.p->execSrNoReplicas = 0; }//if loopCount++; @@ -16259,7 +16152,7 @@ void Dblqh::srFourthComp(Signal* signal) if(cstartType == NodeState::ST_SYSTEM_RESTART) { jam(); - if (c_redo_complete_fragments.first(fragptr)) + if (c_lcp_complete_fragments.first(fragptr)) { jam(); signal->theData[0] = ZENABLE_EXPAND_CHECK; @@ -17367,7 +17260,6 @@ void Dblqh::initFragrec(Signal* signal, fragptr.p->maxGciInLcp = 0; fragptr.p->copyFragState = ZIDLE; fragptr.p->newestGci = cnewestGci; - fragptr.p->nextLcp = 0; fragptr.p->tabRef = tableId; fragptr.p->fragId = fragId; fragptr.p->srStatus = Fragrecord::SS_IDLE; @@ -18456,6 +18348,7 @@ void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat) void Dblqh::startExecSr(Signal* signal) { cnoFragmentsExecSr = 0; + cnoOutstandingExecFragReq = 0; c_lcp_complete_fragments.first(fragptr); signal->theData[0] = fragptr.i; sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); diff --git a/storage/ndb/test/ndbapi/testSystemRestart.cpp b/storage/ndb/test/ndbapi/testSystemRestart.cpp index 89580c0cef8..0f9100f67fa 100644 --- a/storage/ndb/test/ndbapi/testSystemRestart.cpp +++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp @@ -1501,6 +1501,38 @@ int runSR_DD_2(NDBT_Context* ctx, NDBT_Step* step) return result; } +int runBug22696(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb* pNdb = GETNDB(step); + int result = NDBT_OK; + Uint32 loops = ctx->getNumLoops(); + Uint32 rows = ctx->getNumRecords(); + NdbRestarter restarter; + HugoTransactions hugoTrans(*ctx->getTab()); + + Uint32 i = 0; + while(i<=loops && result != NDBT_FAILED) + { + for (Uint32 j = 0; j<10 && result != NDBT_FAILED; j++) + CHECK(hugoTrans.scanUpdateRecords(pNdb, rows) == 0); + + CHECK(restarter.restartAll(false, true, i > 0 ? true : false) == 0); + CHECK(restarter.waitClusterNoStart() == 0); + CHECK(restarter.insertErrorInAllNodes(7072) == 0); + CHECK(restarter.startAll() == 0); + CHECK(restarter.waitClusterStarted() == 0); + + i++; + if (i < loops) + { + NdbSleep_SecSleep(5); // Wait for a few gcp + } + } + + ctx->stopTest(); + return result; +} + int runBug27434(NDBT_Context* ctx, NDBT_Step* step) { @@ -1813,8 +1845,13 @@ TESTCASE("Bug28770", STEP(runBug28770); FINALIZER(runClearTable); } - - +TESTCASE("Bug22696", "") +{ + INITIALIZER(runWaitStarted); + INITIALIZER(runLoadTable); + INITIALIZER(runBug22696); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testSystemRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index b6f3e51a515..37db5e01dd6 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -1042,4 +1042,7 @@ max-time: 300 cmd: test_event args: -n Bug31701 T1 +max-time: 300 +cmd: testSystemRestart +args: -n Bug22696 T1 -- cgit v1.2.1 From 49e3dcf1bb80007fe34303fd12d3d2e82d5c3fd5 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 11:10:36 +0100 Subject: ndb - bug#22696 additional (not stricly necessary for this bug...) only send EXEC_SRCONF to node *in* SR --- storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 51dff6aa4f4..a815b3e2ae4 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -15900,17 +15900,10 @@ void Dblqh::sendExecConf(Signal* signal) void Dblqh::srPhase3Comp(Signal* signal) { jamEntry(); - ndbrequire(cnoOfNodes < MAX_NDB_NODES); - for (Uint32 i = 0; i < cnoOfNodes; i++) { - jam(); - if (cnodeStatus[i] == ZNODE_UP) { - jam(); - ndbrequire(cnodeData[i] < MAX_NDB_NODES); - BlockReference ref = calcLqhBlockRef(cnodeData[i]); - signal->theData[0] = cownNodeid; - sendSignal(ref, GSN_EXEC_SRCONF, signal, 1, JBB); - }//if - }//for + + signal->theData[0] = cownNodeid; + NodeReceiverGroup rg(DBLQH, m_sr_nodes); + sendSignal(rg, GSN_EXEC_SRCONF, signal, 1, JBB); return; }//Dblqh::srPhase3Comp() -- cgit v1.2.1 From cb4df0a2030b3b0f93b342d5fe116218ea400c81 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 13:11:03 +0100 Subject: remove debug printout --- storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index a815b3e2ae4..6efa1b1b116 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -14119,8 +14119,6 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0]; jamEntry(); - printSTART_FRAG_REQ(stdout, signal->getDataPtr(), signal->getLength(), number()); - tabptr.i = startFragReq->tableId; Uint32 fragId = startFragReq->fragId; -- cgit v1.2.1 From 1f7678fe52ae199b87602bea5f35dd3e8e4bc04e Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 15:27:44 +0300 Subject: Changed the help text for --comments to make it clear which option disables the option explicitely. Changed the option location in code so that --help will show it in lexical option order. This is for bug #26215: mysql command line client should not strip comments from SQL statements client/mysql.cc: Changed the help text for --comments to make it clear which option disables the option explicitely. Changed the option location in code so that --help will show it in lexical option order. --- client/mysql.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index a8d88bec274..6af33670ca5 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -595,6 +595,10 @@ static struct my_option my_long_options[] = {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", (gptr*) &default_charset, (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"comments", 'c', "Preserve comments. Send comments to the server." + " The default is --skip-comments (discard comments), enable with --comments", + (gptr*) &preserve_comments, (gptr*) &preserve_comments, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", (gptr*) &opt_compress, (gptr*) &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -755,10 +759,6 @@ static struct my_option my_long_options[] = {"show-warnings", OPT_SHOW_WARNINGS, "Show warnings after every statement.", (gptr*) &show_warnings, (gptr*) &show_warnings, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"comments", 'c', "Preserve comments. Send comments to the server." - " Comments are discarded by default, enable with --enable-comments", - (gptr*) &preserve_comments, (gptr*) &preserve_comments, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; -- cgit v1.2.1 From c4d0901d00bdeb496aeffedc80e6d583711086d3 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 18:59:25 +0300 Subject: Fix for bug #32221: bug25714, mytest, mysql_client_test complaints and crashes. MySQL distributions contain a number of programs that are used only by the MySQL test suite internally, i.e. they are not indended to be invoked directly by a user. As a result, such programs are not documented, do not have any built-in help or proper error reporting, which may confuse users. This patch fixes the problem with the following changes: - mytest, libmysqltest and all references to them were removed from the distribution since they are not used anymore - bug25714 now displays an error message when run with incorrect arguments or with the --help option - mysql_client_test now does not call abort() in case of errors, instead it does a clean exit() with a proper error status. BitKeeper/deleted/.del-mytest.c: Delete: libmysql/mytest.c BitKeeper/deleted/.del-myTest-package.dsp: Delete: VC++Files/libmysqltest/myTest-package.dsp BitKeeper/deleted/.del-myTest-package_ia64.dsp: Delete: VC++Files/libmysqltest/myTest-package_ia64.dsp BitKeeper/deleted/.del-myTest.dsp~4a8c480769193952: Delete: VC++Files/libmysqltest/myTest.dsp BitKeeper/deleted/.del-myTest.vcproj: Delete: VC++Files/libmysqltest/myTest.vcproj BitKeeper/deleted/.del-myTest_ia64.dsp: Delete: VC++Files/libmysqltest/myTest_ia64.dsp BitKeeper/deleted/.del-mytest.c~9a99338689e5de8: Delete: VC++Files/libmysqltest/mytest.c BitKeeper/deleted/.del-mytest.dsw~2324698861155335: Delete: VC++Files/libmysqltest/mytest.dsw VC++Files/copy_mysql_files.bat: Removed references to libmysqltest. libmysql/CMakeLists.txt: Removed rules for myTest. libmysql/Makefile.am: Removed mytest.c from EXTRA_DIST. tests/bug25714.c: Display a meaningful error message when run incorrectly or with the --help option. tests/mysql_client_test.c: In case of error, don't cause abnormal program termination, do a clean exit() with an error status. --- VC++Files/copy_mysql_files.bat | 5 - VC++Files/libmysqltest/myTest-package.dsp | 92 ------------- VC++Files/libmysqltest/myTest-package_ia64.dsp | 92 ------------- VC++Files/libmysqltest/myTest.dsp | 92 ------------- VC++Files/libmysqltest/myTest.vcproj | 146 --------------------- VC++Files/libmysqltest/myTest_ia64.dsp | 94 ------------- VC++Files/libmysqltest/mytest.c | 175 ------------------------- VC++Files/libmysqltest/mytest.dsw | 28 ---- libmysql/CMakeLists.txt | 3 - libmysql/Makefile.am | 2 +- libmysql/mytest.c | 175 ------------------------- tests/bug25714.c | 6 +- tests/mysql_client_test.c | 2 +- 13 files changed, 7 insertions(+), 905 deletions(-) delete mode 100644 VC++Files/libmysqltest/myTest-package.dsp delete mode 100644 VC++Files/libmysqltest/myTest-package_ia64.dsp delete mode 100644 VC++Files/libmysqltest/myTest.dsp delete mode 100644 VC++Files/libmysqltest/myTest.vcproj delete mode 100644 VC++Files/libmysqltest/myTest_ia64.dsp delete mode 100644 VC++Files/libmysqltest/mytest.c delete mode 100644 VC++Files/libmysqltest/mytest.dsw delete mode 100644 libmysql/mytest.c diff --git a/VC++Files/copy_mysql_files.bat b/VC++Files/copy_mysql_files.bat index 7d6070eb1a8..8a296187cf3 100644 --- a/VC++Files/copy_mysql_files.bat +++ b/VC++Files/copy_mysql_files.bat @@ -78,11 +78,6 @@ copy include\conf*.h c:\mysql\include copy include\my_global.h c:\mysql\include\my_global.h copy libmysql\libmysql.def c:\mysql\include -REM Copy test files - -copy libmysqltest\*.* c:\mysql\examples\libmysqltest -copy libmysqltest\release\myTest.exe c:\mysql\examples\libmysqltest - REM Copy share, docs etc xcopy share\*.* c:\mysql\share /E /Y diff --git a/VC++Files/libmysqltest/myTest-package.dsp b/VC++Files/libmysqltest/myTest-package.dsp deleted file mode 100644 index a5c73d447b3..00000000000 --- a/VC++Files/libmysqltest/myTest-package.dsp +++ /dev/null @@ -1,92 +0,0 @@ -# Microsoft Developer Studio Project File - Name="myTest" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=myTest - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak" CFG="myTest - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "myTest - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "myTest - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=xicl6.exe -RSC=rc.exe - -!IF "$(CFG)" == "myTest - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /W3 /O2 /I "..\..\include" /D "NDEBUG" /D "DBUG_UFF" /D "_CONSOLE" /D "_MBCS" /FD /c -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 ..\..\lib\opt\libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /libpath:"..\lib_release" - -!ELSEIF "$(CFG)" == "myTest - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /MTd /W3 /Z7 /Od /I "..\..\include" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /c -# SUBTRACT CPP /Fr /YX -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 ..\..\lib\debug\libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /incremental:no /debug /machine:I386 /pdbtype:sept /libpath:"..\lib_debug" - -!ENDIF - -# Begin Target - -# Name "myTest - Win32 Release" -# Name "myTest - Win32 Debug" -# Begin Source File - -SOURCE=.\Mytest.c -# End Source File -# End Target -# End Project diff --git a/VC++Files/libmysqltest/myTest-package_ia64.dsp b/VC++Files/libmysqltest/myTest-package_ia64.dsp deleted file mode 100644 index ef80a773815..00000000000 --- a/VC++Files/libmysqltest/myTest-package_ia64.dsp +++ /dev/null @@ -1,92 +0,0 @@ -# Microsoft Developer Studio Project File - Name="myTest" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=myTest - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak" CFG="myTest - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "myTest - WinIA64 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "myTest - WinIA64 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=xicl6.exe -RSC=rc.exe - -!IF "$(CFG)" == "myTest - WinIA64 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN64" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /W3 /I "..\include" /D"NDEBUG" /D"DBUG_UFF" /D"_CONSOLE" /D"_MBCS" /FD /c /O2 /G2 /EHsc /D"_IA64_" /Zi /D"WIN64" /D"WIN32" /D"_AFX_NO_DAO_SUPPORT" /Wp64 /Zm600 -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 ..\lib_release\libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:IA64 /libpath:"..\lib_release" /incremental:no - -!ELSEIF "$(CFG)" == "myTest - WinIA64 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN64" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MTd /W3 /Z7 /I "..\include" /D"_DEBUG" /D"_CONSOLE" /D"_MBCS" /FD /c /Od /G2 /EHsc /D"_IA64_" /Zi /D"WIN64" /D"WIN32" /D"_AFX_NO_DAO_SUPPORT" /Wp64 /Zm600 -# SUBTRACT CPP /Fr /YX -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:IA64 -# ADD LINK32 ..\lib_debug\libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:IA64 /libpath:"..\lib_debug" /incremental:no - -!ENDIF - -# Begin Target - -# Name "myTest - WinIA64 Release" -# Name "myTest - WinIA64 Debug" -# Begin Source File - -SOURCE=.\Mytest.c -# End Source File -# End Target -# End Project diff --git a/VC++Files/libmysqltest/myTest.dsp b/VC++Files/libmysqltest/myTest.dsp deleted file mode 100644 index ca0f9e6e147..00000000000 --- a/VC++Files/libmysqltest/myTest.dsp +++ /dev/null @@ -1,92 +0,0 @@ -# Microsoft Developer Studio Project File - Name="myTest" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=myTest - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "myTest.mak" CFG="myTest - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "myTest - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "myTest - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=xicl6.exe -RSC=rc.exe - -!IF "$(CFG)" == "myTest - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /W3 /O2 /I "..\include" /D "DBUG_UFF" /D "_CONSOLE" /D "_MBCS" /D "NDEBUG" /FD /c -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /libpath:"..\lib_release" - -!ELSEIF "$(CFG)" == "myTest - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /MTd /W3 /Z7 /Od /I "..\include" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /c -# SUBTRACT CPP /Fr /YX -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /incremental:no /debug /machine:I386 /pdbtype:sept /libpath:"..\lib_debug" - -!ENDIF - -# Begin Target - -# Name "myTest - Win32 Release" -# Name "myTest - Win32 Debug" -# Begin Source File - -SOURCE=.\Mytest.c -# End Source File -# End Target -# End Project diff --git a/VC++Files/libmysqltest/myTest.vcproj b/VC++Files/libmysqltest/myTest.vcproj deleted file mode 100644 index 53f93a7e05d..00000000000 --- a/VC++Files/libmysqltest/myTest.vcproj +++ /dev/null @@ -1,146 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/VC++Files/libmysqltest/myTest_ia64.dsp b/VC++Files/libmysqltest/myTest_ia64.dsp deleted file mode 100644 index 4affa81ef54..00000000000 --- a/VC++Files/libmysqltest/myTest_ia64.dsp +++ /dev/null @@ -1,94 +0,0 @@ -# Microsoft Developer Studio Project File - Name="myTest" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=myTest - WinIA64 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "myTest_ia64.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "myTest_ia64.mak" CFG="myTest - WinIA64 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "myTest - WinIA64 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "myTest - WinIA64 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -RSC=rc.exe - -!IF "$(CFG)" == "myTest - WinIA64 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -MTL=midl.exe -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN64" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /W3 /Zi /O2 /I "..\include" /D "DBUG_UFF" /D "_CONSOLE" /D "_MBCS" /D "NDEBUG" /D "_IA64_" /D "WIN64" /D "WIN32" /D "_AFX_NO_DAO_SUPPORT" /FD /G2 /EHsc /Wp64 /Zm600 /c -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:IA64 -# ADD LINK32 libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib bufferoverflowU.lib /nologo /subsystem:console /libpath:"..\lib_release" /machine:IA64 - -!ELSEIF "$(CFG)" == "myTest - WinIA64 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -MTL=midl.exe -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN64" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MTd /W3 /Zi /Od /I "..\include" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /D "_IA64_" /D "WIN64" /D "WIN32" /D "_AFX_NO_DAO_SUPPORT" /FD /G2 /EHsc /Wp64 /Zm600 /c -# SUBTRACT CPP /Fr /YX -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:IA64 -# ADD LINK32 libmysql.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib bufferoverflowU.lib /nologo /subsystem:console /incremental:no /debug /libpath:"..\lib_debug" /machine:IA64 - -!ENDIF - -# Begin Target - -# Name "myTest - WinIA64 Release" -# Name "myTest - WinIA64 Debug" -# Begin Source File - -SOURCE=.\Mytest.c -# End Source File -# End Target -# End Project diff --git a/VC++Files/libmysqltest/mytest.c b/VC++Files/libmysqltest/mytest.c deleted file mode 100644 index a1dc13db39f..00000000000 --- a/VC++Files/libmysqltest/mytest.c +++ /dev/null @@ -1,175 +0,0 @@ -/*C4*/ -/****************************************************************/ -/* Author: Jethro Wright, III TS : 3/ 4/1998 9:15 */ -/* Date: 02/18/1998 */ -/* mytest.c : do some testing of the libmySQL.DLL.... */ -/* */ -/* History: */ -/* 02/18/1998 jw3 also sprach zarathustra.... */ -/****************************************************************/ - - -#include -#include -#include - -#include - -#define DEFALT_SQL_STMT "SELECT * FROM db" -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) -#endif - - -/******************************************************** -** -** main :- -** -********************************************************/ - -int -main( int argc, char * argv[] ) -{ - - char szSQL[ 200 ], aszFlds[ 25 ][ 25 ], szDB[ 50 ] ; - const char *pszT; - int i, j, k, l, x ; - MYSQL * myData ; - MYSQL_RES * res ; - MYSQL_FIELD * fd ; - MYSQL_ROW row ; - - //....just curious.... - printf( "sizeof( MYSQL ) == %d\n", (int) sizeof( MYSQL ) ) ; - if ( argc == 2 ) - { - strcpy( szDB, argv[ 1 ] ) ; - strcpy( szSQL, DEFALT_SQL_STMT ) ; - if (!strcmp(szDB,"--debug")) - { - strcpy( szDB, "mysql" ) ; - printf("Some mysql struct information (size and offset):\n"); - printf("net:\t%3d %3d\n",(int) sizeof(myData->net), - (int) offsetof(MYSQL,net)); - printf("host:\t%3d %3d\n",(int) sizeof(myData->host), - (int) offsetof(MYSQL,host)); - printf("port:\t%3d %3d\n", (int) sizeof(myData->port), - (int) offsetof(MYSQL,port)); - printf("protocol_version:\t%3d %3d\n", - (int) sizeof(myData->protocol_version), - (int) offsetof(MYSQL,protocol_version)); - printf("thread_id:\t%3d %3d\n",(int) sizeof(myData->thread_id), - (int) offsetof(MYSQL,thread_id)); - printf("affected_rows:\t%3d %3d\n",(int) sizeof(myData->affected_rows), - (int) offsetof(MYSQL,affected_rows)); - printf("packet_length:\t%3d %3d\n",(int) sizeof(myData->packet_length), - (int) offsetof(MYSQL,packet_length)); - printf("status:\t%3d %3d\n",(int) sizeof(myData->status), - (int) offsetof(MYSQL,status)); - printf("fields:\t%3d %3d\n",(int) sizeof(myData->fields), - (int) offsetof(MYSQL,fields)); - printf("field_alloc:\t%3d %3d\n",(int) sizeof(myData->field_alloc), - (int) offsetof(MYSQL,field_alloc)); - printf("free_me:\t%3d %3d\n",(int) sizeof(myData->free_me), - (int) offsetof(MYSQL,free_me)); - printf("options:\t%3d %3d\n",(int) sizeof(myData->options), - (int) offsetof(MYSQL,options)); - puts(""); - } - } - else if ( argc > 2 ) { - strcpy( szDB, argv[ 1 ] ) ; - strcpy( szSQL, argv[ 2 ] ) ; - } - else { - strcpy( szDB, "mysql" ) ; - strcpy( szSQL, DEFALT_SQL_STMT ) ; - } - //.... - - if ( (myData = mysql_init((MYSQL*) 0)) && - mysql_real_connect( myData, NULL, NULL, NULL, NULL, MYSQL_PORT, - NULL, 0 ) ) - { - myData->reconnect= 1; - if ( mysql_select_db( myData, szDB ) < 0 ) { - printf( "Can't select the %s database !\n", szDB ) ; - mysql_close( myData ) ; - return 2 ; - } - } - else { - printf( "Can't connect to the mysql server on port %d !\n", - MYSQL_PORT ) ; - mysql_close( myData ) ; - return 1 ; - } - //.... - if ( ! mysql_query( myData, szSQL ) ) { - res = mysql_store_result( myData ) ; - i = (int) mysql_num_rows( res ) ; l = 1 ; - printf( "Query: %s\nNumber of records found: %ld\n", szSQL, i ) ; - //....we can get the field-specific characteristics here.... - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - //.... - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Record #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - mysql_free_result( res ) ; - } - else printf( "Couldn't execute %s on the server !\n", szSQL ) ; - //.... - puts( "==== Diagnostic info ====" ) ; - pszT = mysql_get_client_info() ; - printf( "Client info: %s\n", pszT ) ; - //.... - pszT = mysql_get_host_info( myData ) ; - printf( "Host info: %s\n", pszT ) ; - //.... - pszT = mysql_get_server_info( myData ) ; - printf( "Server info: %s\n", pszT ) ; - //.... - res = mysql_list_processes( myData ) ; l = 1 ; - if (res) - { - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Process #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - } - else - { - printf("Got error %s when retreiving processlist\n",mysql_error(myData)); - } - //.... - res = mysql_list_tables( myData, "%" ) ; l = 1 ; - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Table #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - //.... - pszT = mysql_stat( myData ) ; - puts( pszT ) ; - //.... - mysql_close( myData ) ; - return 0 ; - -} diff --git a/VC++Files/libmysqltest/mytest.dsw b/VC++Files/libmysqltest/mytest.dsw deleted file mode 100644 index 1aa804386bc..00000000000 --- a/VC++Files/libmysqltest/mytest.dsw +++ /dev/null @@ -1,28 +0,0 @@ -Microsoft Developer Studio Workspace File, Format Version 5.00 -# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! - -############################################################################### - -Project: "myTest"=".\myTest.dsp" - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -Global: - -Package=<5> -{{{ -}}} - -Package=<3> -{{{ -}}} - -############################################################################### diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt index 59e70d68a6d..7de504aa2f0 100755 --- a/libmysql/CMakeLists.txt +++ b/libmysql/CMakeLists.txt @@ -125,9 +125,6 @@ TARGET_LINK_LIBRARIES(mysqlclient) ADD_DEPENDENCIES(mysqlclient_notls GenError) TARGET_LINK_LIBRARIES(mysqlclient_notls) -ADD_EXECUTABLE(myTest mytest.c) -TARGET_LINK_LIBRARIES(myTest libmysql) - IF(EMBED_MANIFESTS) MYSQL_EMBED_MANIFEST("myTest" "asInvoker") ENDIF(EMBED_MANIFESTS) diff --git a/libmysql/Makefile.am b/libmysql/Makefile.am index 38680c98d53..13497331683 100644 --- a/libmysql/Makefile.am +++ b/libmysql/Makefile.am @@ -31,7 +31,7 @@ include $(srcdir)/Makefile.shared libmysqlclient_la_SOURCES = $(target_sources) libmysqlclient_la_LIBADD = $(target_libadd) $(yassl_las) libmysqlclient_la_LDFLAGS = $(target_ldflags) -EXTRA_DIST = Makefile.shared libmysql.def dll.c mytest.c CMakeLists.txt +EXTRA_DIST = Makefile.shared libmysql.def dll.c CMakeLists.txt noinst_HEADERS = client_settings.h # This is called from the toplevel makefile diff --git a/libmysql/mytest.c b/libmysql/mytest.c deleted file mode 100644 index 2d5c576b72a..00000000000 --- a/libmysql/mytest.c +++ /dev/null @@ -1,175 +0,0 @@ -/*C4*/ -/****************************************************************/ -/* Author: Jethro Wright, III TS : 3/ 4/1998 9:15 */ -/* Date: 02/18/1998 */ -/* mytest.c : do some testing of the libmySQL.DLL.... */ -/* */ -/* History: */ -/* 02/18/1998 jw3 also sprach zarathustra.... */ -/****************************************************************/ - - -#include -#include -#include - -#include - -#define DEFALT_SQL_STMT "SELECT * FROM db" -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) -#endif - - -/******************************************************** -** -** main :- -** -********************************************************/ - -int -main( int argc, char * argv[] ) -{ - - char szSQL[ 200 ], aszFlds[ 25 ][ 25 ], szDB[ 50 ] ; - const char *pszT; - int i, j, k, l, x ; - MYSQL * myData ; - MYSQL_RES * res ; - MYSQL_FIELD * fd ; - MYSQL_ROW row ; - - //....just curious.... - printf( "sizeof( MYSQL ) == %d\n", (int) sizeof( MYSQL ) ) ; - if ( argc == 2 ) - { - strcpy( szDB, argv[ 1 ] ) ; - strcpy( szSQL, DEFALT_SQL_STMT ) ; - if (!strcmp(szDB,"--debug")) - { - strcpy( szDB, "mysql" ) ; - printf("Some mysql struct information (size and offset):\n"); - printf("net:\t%3d %3d\n",(int) sizeof(myData->net), - (int) offsetof(MYSQL,net)); - printf("host:\t%3d %3d\n",(int) sizeof(myData->host), - (int) offsetof(MYSQL,host)); - printf("port:\t%3d %3d\n", (int) sizeof(myData->port), - (int) offsetof(MYSQL,port)); - printf("protocol_version:\t%3d %3d\n", - (int) sizeof(myData->protocol_version), - (int) offsetof(MYSQL,protocol_version)); - printf("thread_id:\t%3d %3d\n",(int) sizeof(myData->thread_id), - (int) offsetof(MYSQL,thread_id)); - printf("affected_rows:\t%3d %3d\n",(int) sizeof(myData->affected_rows), - (int) offsetof(MYSQL,affected_rows)); - printf("packet_length:\t%3d %3d\n",(int) sizeof(myData->packet_length), - (int) offsetof(MYSQL,packet_length)); - printf("status:\t%3d %3d\n",(int) sizeof(myData->status), - (int) offsetof(MYSQL,status)); - printf("fields:\t%3d %3d\n",(int) sizeof(myData->fields), - (int) offsetof(MYSQL,fields)); - printf("field_alloc:\t%3d %3d\n",(int) sizeof(myData->field_alloc), - (int) offsetof(MYSQL,field_alloc)); - printf("free_me:\t%3d %3d\n",(int) sizeof(myData->free_me), - (int) offsetof(MYSQL,free_me)); - printf("options:\t%3d %3d\n",(int) sizeof(myData->options), - (int) offsetof(MYSQL,options)); - puts(""); - } - } - else if ( argc > 2 ) { - strcpy( szDB, argv[ 1 ] ) ; - strcpy( szSQL, argv[ 2 ] ) ; - } - else { - strcpy( szDB, "mysql" ) ; - strcpy( szSQL, DEFALT_SQL_STMT ) ; - } - //.... - - if ( (myData = mysql_init((MYSQL*) 0)) && - mysql_real_connect( myData, NULL, NULL, NULL, NULL, MYSQL_PORT, - NULL, 0 ) ) - { - myData->reconnect= 1; - if ( mysql_select_db( myData, szDB ) < 0 ) { - printf( "Can't select the %s database !\n", szDB ) ; - mysql_close( myData ) ; - return 2 ; - } - } - else { - printf( "Can't connect to the mysql server on port %d !\n", - MYSQL_PORT ) ; - mysql_close( myData ) ; - return 1 ; - } - //.... - if ( ! mysql_query( myData, szSQL ) ) { - res = mysql_store_result( myData ) ; - i = (int) mysql_num_rows( res ) ; l = 1 ; - printf( "Query: %s\nNumber of records found: %ld\n", szSQL, i ) ; - //....we can get the field-specific characteristics here.... - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - //.... - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Record #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - mysql_free_result( res ) ; - } - else printf( "Couldn't execute %s on the server !\n", szSQL ) ; - //.... - puts( "==== Diagnostic info ====" ) ; - pszT = mysql_get_client_info() ; - printf( "Client info: %s\n", pszT ) ; - //.... - pszT = mysql_get_host_info( myData ) ; - printf( "Host info: %s\n", pszT ) ; - //.... - pszT = mysql_get_server_info( myData ) ; - printf( "Server info: %s\n", pszT ) ; - //.... - res = mysql_list_processes( myData ) ; l = 1 ; - if (res) - { - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Process #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - } - else - { - printf("Got error %s when retreiving processlist\n",mysql_error(myData)); - } - //.... - res = mysql_list_tables( myData, "%" ) ; l = 1 ; - for ( x = 0 ; fd = mysql_fetch_field( res ) ; x++ ) - strcpy( aszFlds[ x ], fd->name ) ; - while ( row = mysql_fetch_row( res ) ) { - j = mysql_num_fields( res ) ; - printf( "Table #%ld:-\n", l++ ) ; - for ( k = 0 ; k < j ; k++ ) - printf( " Fld #%d (%s): %s\n", k + 1, aszFlds[ k ], - (((row[k]==NULL)||(!strlen(row[k])))?"NULL":row[k])) ; - puts( "==============================\n" ) ; - } - //.... - pszT = mysql_stat( myData ) ; - puts( pszT ) ; - //.... - mysql_close( myData ) ; - return 0 ; - -} diff --git a/tests/bug25714.c b/tests/bug25714.c index e9b2be44209..88485aa1962 100644 --- a/tests/bug25714.c +++ b/tests/bug25714.c @@ -29,8 +29,12 @@ int main (int argc, char **argv) MY_INIT(argv[0]); - if (argc != 2) + if (argc != 2 || !strcmp(argv[1], "--help")) + { + fprintf(stderr, "This program is a part of the MySQL test suite. " + "It is not intended to be executed directly by a user.\n"); return -1; + } mysql_init(&conn); if (!mysql_real_connect( diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index b9f39021114..8ab8f209ae6 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -123,7 +123,7 @@ static void die(const char *file, int line, const char *expr) fflush(stdout); fprintf(stderr, "%s:%d: check failed: '%s'\n", file, line, expr); fflush(stderr); - abort(); + exit(1); } -- cgit v1.2.1 From ea739898281a86c074275f2f0a084840d7ed3e44 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 21:34:21 +0400 Subject: Fixed bug #32282: TEXT silently truncates when value is exactly 65536 bytes length. The server has been modified to report warnings on truncation to 65536 bytes as usual. sql/sql_string.cc: Fixed bug #32282. The well_formed_copy_nchars function returned an incorrect value of copied bytes of the truncated input string: extra length of the first truncated character added to the *from_end_pos pointer. That has been fixed. mysql-test/r/type_blob.result: Added test case for bug #32282. mysql-test/t/type_blob.test: Added test case for bug #32282. --- mysql-test/r/type_blob.result | 14 ++++++++++++++ mysql-test/t/type_blob.test | 11 +++++++++++ sql/sql_string.cc | 5 ++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result index c72ee005428..80d233ee16a 100644 --- a/mysql-test/r/type_blob.result +++ b/mysql-test/r/type_blob.result @@ -807,4 +807,18 @@ set @@sql_mode='TRADITIONAL'; create table t1 (a text default ''); ERROR 42000: BLOB/TEXT column 'a' can't have a default value set @@sql_mode=''; +CREATE TABLE t (c TEXT CHARSET ASCII); +INSERT INTO t (c) VALUES (REPEAT('1',65537)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +INSERT INTO t (c) VALUES (REPEAT('2',65536)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +INSERT INTO t (c) VALUES (REPEAT('3',65535)); +SELECT LENGTH(c), CHAR_LENGTH(c) FROM t; +LENGTH(c) CHAR_LENGTH(c) +65535 65535 +65535 65535 +65535 65535 +DROP TABLE t; End of 5.0 tests diff --git a/mysql-test/t/type_blob.test b/mysql-test/t/type_blob.test index ba9f374a24c..d79b749dd65 100644 --- a/mysql-test/t/type_blob.test +++ b/mysql-test/t/type_blob.test @@ -436,4 +436,15 @@ set @@sql_mode='TRADITIONAL'; create table t1 (a text default ''); set @@sql_mode=''; +# +# Bug #32282: TEXT silently truncates when value is exactly 65536 bytes +# + +CREATE TABLE t (c TEXT CHARSET ASCII); +INSERT INTO t (c) VALUES (REPEAT('1',65537)); +INSERT INTO t (c) VALUES (REPEAT('2',65536)); +INSERT INTO t (c) VALUES (REPEAT('3',65535)); +SELECT LENGTH(c), CHAR_LENGTH(c) FROM t; +DROP TABLE t; + --echo End of 5.0 tests diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 9d7df73cd7a..606a9ddb26d 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -858,7 +858,7 @@ outp: with optional left padding (for binary -> UCS2 conversion) SYNOPSIS - well_formed_copy_nhars() + well_formed_copy_nchars() to Store result here to_length Maxinum length of "to" string to_cs Character set of "to" string @@ -997,7 +997,10 @@ outp: goto outp; } else + { + from= from_prev; break; + } } *from_end_pos= from; res= to - to_start; -- cgit v1.2.1 From 13105534fa6107c5afb139a4459c659329525b2e Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 20:00:25 +0000 Subject: Bug#31048: Many nested subqueries may cause server crash. This bug is actually two. The first one manifests itself on an EXPLAIN SELECT query with nested subqueries that employs the filesort algorithm. The whole SELECT under explain is marked as UNCACHEABLE_EXPLAIN to preserve some temporary structures for explain. As a side-effect of this values of nested subqueries weren't cached and subqueries were re-evaluated many times. Each time buffer for filesort was allocated but wasn't freed because freeing occurs at the end of topmost SELECT. Thus all available memory was eaten up step by step and OOM event occur. The second bug manifests itself on SELECT queries with conditions where a subquery result is compared with a key field and the subquery itself also has such condition. When a long chain of such nested subqueries is present the stack overrun occur. This happens because at some point the range optimizer temporary puts the PARAM structure on the stack. Its size if about 8K and the stack is exhausted very fast. Now the subselect_single_select_engine::exec function allows subquery result caching when the UNCACHEABLE_EXPLAIN flag is set. Now the SQL_SELECT::test_quick_select function calls the check_stack_overrun function for stack checking purposes to prevent server crash. mysql-test/t/subselect.test: Added a test case for the bug#31048: Many nested subqueries may cause server crash. mysql-test/r/subselect.result: Added a test case for the bug#31048: Many nested subqueries may cause server crash. sql/opt_range.cc: Bug#31048: Many nested subqueries may cause server crash. Now the SQL_SELECT::test_quick_select function calls the check_stack_overrun function for stack checking purposes to preven server crash. sql/item_subselect.cc: Bug31048: Many nested subqueries may cause server crash. Now the subselect_single_select_engine::exec function allows subquery result caching when the UNCACHEABLE_EXPLAIN flag is set. --- mysql-test/r/subselect.result | 104 ++++++++++++++++++++++++++++++++++++++++++ mysql-test/t/subselect.test | 95 ++++++++++++++++++++++++++++++++++++++ sql/item_subselect.cc | 4 +- sql/opt_range.cc | 6 +++ 4 files changed, 208 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index be99bdb1afc..50017cc1485 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -4139,4 +4139,108 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; (SELECT SUM(t1.a) FROM t2 WHERE a=1) 3 DROP TABLE t1,t2; +create table t1(a int,b int,key(a),key(b)); +insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), +(6,7),(7,4),(5,3); +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +sum(a) a +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index a a 5 NULL 9 Using where; Using index +2 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +3 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +4 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +5 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +6 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +7 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +8 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +9 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +10 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +11 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +12 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort +13 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort +14 SUBQUERY t1 index NULL a 5 NULL 9 Using index +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +drop table t1; End of 5.0 tests. diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index d076ca6bd33..1f912cadd4b 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -2987,4 +2987,99 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a!=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; DROP TABLE t1,t2; +# +# Bug31048: Many nested subqueries may cause server crash. +# +create table t1(a int,b int,key(a),key(b)); +insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), + (6,7),(7,4),(5,3); +# test for the stack overflow bug +select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +--replace_regex /overrun.*$/overrun detected/ +--error 1436 +select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +# test for the memory consumption & subquery slowness bug +explain select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +--replace_regex /overrun.*$/overrun detected/ +--error 1436 +explain select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +drop table t1; --echo End of 5.0 tests. diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 0020dd35c61..19eb7708539 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1800,7 +1800,9 @@ int subselect_single_select_engine::exec() DBUG_RETURN(1); } } - if (select_lex->uncacheable && executed) + if (select_lex->uncacheable && + select_lex->uncacheable != UNCACHEABLE_EXPLAIN + && executed) { if (join->reinit()) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 969777d4792..dbdb2b919dc 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1978,12 +1978,18 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, keys_to_use.intersect(head->keys_in_use_for_query); if (!keys_to_use.is_clear_all()) { +#ifndef EMBEDDED_LIBRARY // Avoid compiler warning + char buff[STACK_BUFF_ALLOC]; +#endif MEM_ROOT alloc; SEL_TREE *tree= NULL; KEY_PART *key_parts; KEY *key_info; PARAM param; + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + DBUG_RETURN(0); // Fatal error flag is set + /* set up parameter that is passed to all functions */ param.thd= thd; param.baseflag=head->file->table_flags(); -- cgit v1.2.1 From 4addb6b71777706f4050163c66f3c1d9dc896cfe Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2007 21:05:17 +0000 Subject: Bug#30384: Having SQL_BUFFER_RESULT option in the CREATE .. KEY(..) .. SELECT led to creating corrupted index. Corrected fix. The new method called prepare2 is added to the select_create class. As all preparations are done by the select_create::prepare function it doesn't do anything. Slightly changed algorithm of calling the start_bulk_insert function. Now it's called from the select_insert::prepare2 function when the SQL_BUFFER_RESULT flags is set. The is_bulk_insert_mode flag is removed as it is not needed anymore. sql/sql_class.h: Bug#30384: Having SQL_BUFFER_RESULT option in the CREATE .. KEY(..) .. SELECT led to creating corrupted index. Corrected fix. The new method called prepare2 is added to the select_create class. As all preparetions are done by the select_create::prepare function it doesn't do anything. The is_bulk_insert_mode flag is removed as it is not needed anymore. sql/sql_insert.cc: Bug#30384: Having SQL_BUFFER_RESULT option in the CREATE .. KEY(..) .. SELECT led to creating corrupted index. Slightly changed algorithm of calling the start_bulk_insert function. Now it's called from the select_insert::prepare2 function when the SQL_BUFFER_RESULT flags is set. Corrected fix. The is_bulk_insert_mode flag is removed as it is not needed anymore. --- sql/sql_class.h | 4 ++-- sql/sql_insert.cc | 18 +++++------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/sql/sql_class.h b/sql/sql_class.h index e6d65f3133a..936798c9344 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2029,14 +2029,13 @@ class select_insert :public select_result_interceptor { ulonglong last_insert_id; COPY_INFO info; bool insert_into_view; - bool is_bulk_insert_mode; select_insert(TABLE_LIST *table_list_par, TABLE *table_par, List *fields_par, List *update_fields, List *update_values, enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List &list, SELECT_LEX_UNIT *u); - int prepare2(void); + virtual int prepare2(void); bool send_data(List &items); virtual void store_values(List &values); void send_error(uint errcode,const char *err); @@ -2071,6 +2070,7 @@ public: void send_error(uint errcode,const char *err); bool send_eof(); void abort(); + int prepare2(void) { return 0; } }; #include diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 770bbd1349d..48c1ee8e51f 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2645,8 +2645,7 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), last_insert_id(0), - insert_into_view(table_list_par && table_list_par->view != 0), - is_bulk_insert_mode(FALSE) + insert_into_view(table_list_par && table_list_par->view != 0) { bzero((char*) &info,sizeof(info)); info.handle_duplicates= duplic; @@ -2755,14 +2754,14 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) Is table which we are changing used somewhere in other parts of query */ - if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && - unique_table(thd, table_list, table_list->next_global, 0)) + if (unique_table(thd, table_list, table_list->next_global, 0)) { /* Using same table for INSERT and SELECT */ lex->current_select->options|= OPTION_BUFFER_RESULT; lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; } - else if (!thd->prelocked_mode) + else if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && + !thd->prelocked_mode) { /* We must not yet prepare the result table if it is the same as one of the @@ -2831,11 +2830,8 @@ int select_insert::prepare2(void) { DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && - !thd->prelocked_mode && !is_bulk_insert_mode) - { + !thd->prelocked_mode) table->file->start_bulk_insert((ha_rows) 0); - is_bulk_insert_mode= TRUE; - } DBUG_RETURN(0); } @@ -2941,7 +2937,6 @@ bool select_insert::send_eof() DBUG_ENTER("select_insert::send_eof"); error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; - is_bulk_insert_mode= FALSE; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -3277,10 +3272,7 @@ select_create::prepare(List &values, SELECT_LEX_UNIT *u) if (info.handle_duplicates == DUP_UPDATE) table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE); if (!thd->prelocked_mode) - { table->file->start_bulk_insert((ha_rows) 0); - is_bulk_insert_mode= TRUE; - } thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | -- cgit v1.2.1 From 0b22925cc0553c11fc7d131f27b1846253a8ee87 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 05:02:49 +0300 Subject: BUG#30573: Ordered range scan over partitioned tables returns some rows twice The problem: ha_partition::read_range_first() could return a record that is outside of the scanned range. If that record happened to be in the next subsequent range, it would satisfy the WHERE and appear in the output twice. (we would get it the second time when scanning the next subsequent range) Fix: Made ha_partition::read_range_first() check if the returned recod is within the scanned range, like other read_range_first() implementations do. mysql-test/r/partition_range.result: BUG#30573: Ordered range scan over partitioned tables returns some rows twice - Testcase mysql-test/t/partition_range.test: BUG#30573: Ordered range scan over partitioned tables returns some rows twice - Testcase sql/ha_partition.cc: BUG#30573: Ordered range scan over partitioned tables returns some rows twice - Make ha_partition::read_range_first() check if the returned record is within the range. --- mysql-test/r/partition_range.result | 22 +++++++++++++++++++++- mysql-test/t/partition_range.test | 28 +++++++++++++++++++++++++++- sql/ha_partition.cc | 2 +- 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/partition_range.result b/mysql-test/r/partition_range.result index a61006e87a4..f75cbbf5470 100644 --- a/mysql-test/r/partition_range.result +++ b/mysql-test/r/partition_range.result @@ -1,4 +1,4 @@ -drop table if exists t1; +drop table if exists t1, t2; create table t1 (a int unsigned) partition by range (a) (partition pnull values less than (0), @@ -709,3 +709,23 @@ WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where DROP TABLE t1; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t2 ( +defid int(10) unsigned NOT NULL, +day int(10) unsigned NOT NULL, +count int(10) unsigned NOT NULL, +filler char(200), +KEY (defid,day) +) +PARTITION BY RANGE (day) ( +PARTITION p7 VALUES LESS THAN (20070401) , +PARTITION p8 VALUES LESS THAN (20070501)); +insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B; +insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B; +insert into t2 values(52, 20070321, 123, 'filler') ; +insert into t2 values(52, 20070322, 456, 'filler') ; +select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid; +sum(count) +579 +drop table t1, t2; diff --git a/mysql-test/t/partition_range.test b/mysql-test/t/partition_range.test index 50d850913bc..a9f6d410fa3 100644 --- a/mysql-test/t/partition_range.test +++ b/mysql-test/t/partition_range.test @@ -6,7 +6,7 @@ -- source include/have_partition.inc --disable_warnings -drop table if exists t1; +drop table if exists t1, t2; --enable_warnings # @@ -757,3 +757,29 @@ DROP TABLE t1; # a = "C2345678901234567890"; #select * from t1 where a = "12345678901234567890"; #drop table t1; + + +# +# BUG#30573: get wrong result with "group by" on PARTITIONed table +# +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t2 ( + defid int(10) unsigned NOT NULL, + day int(10) unsigned NOT NULL, + count int(10) unsigned NOT NULL, + filler char(200), + KEY (defid,day) +) +PARTITION BY RANGE (day) ( + PARTITION p7 VALUES LESS THAN (20070401) , + PARTITION p8 VALUES LESS THAN (20070501)); + +insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B; +insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B; +insert into t2 values(52, 20070321, 123, 'filler') ; +insert into t2 values(52, 20070322, 456, 'filler') ; + +select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid; +drop table t1, t2; + diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 1400d9da753..423415ce4ae 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3838,7 +3838,7 @@ int ha_partition::read_range_first(const key_range *start_key, start_key->key, start_key->keypart_map, start_key->flag); } - DBUG_RETURN(error); + DBUG_RETURN (error? error: compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); } -- cgit v1.2.1 From 2eaa6352e93b3b011be4c643d534b39a9fc8bf76 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 15:46:33 +0400 Subject: merging --- sql/mysql_priv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index b5f0fa3a678..d58e3ae58ed 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -679,7 +679,7 @@ extern my_decimal decimal_zero; void free_items(Item *item); void cleanup_items(Item *item); class THD; -void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); +void close_thread_tables(THD *thd); #ifndef NO_EMBEDDED_ACCESS_CHECKS bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); -- cgit v1.2.1 From a8dd1299cc7b1cedd24533a4369970d22a7ba65d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 17:04:24 +0400 Subject: test case added for the bug #31155 mysql-test/r/gis.result: test result complete mysql-test/t/gis.test: test case added for the bug --- mysql-test/r/gis.result | 6 ++++++ mysql-test/t/gis.test | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 84b7b449cb5..953dd94be7a 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -957,4 +957,10 @@ COUNT(*) 2 DROP TABLE t1, t2; End of 5.0 tests +create table `t1` (`col002` point)engine=myisam; +insert into t1 values (),(),(); +select min(`col002`) from t1 union select `col002` from t1; +min(`col002`) +NULL +drop table t1; End of 5.0 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 2520c9c478e..c115396ec03 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -631,4 +631,13 @@ SELECT 1; -- source include/gis_keys.inc +# +# Bug #31155 gis types in union'd select cause crash +# + +create table `t1` (`col002` point)engine=myisam; +insert into t1 values (),(),(); +select min(`col002`) from t1 union select `col002` from t1; +drop table t1; + --echo End of 5.0 tests -- cgit v1.2.1 From 1f43154bed6392925225e702932bdd1872813ee7 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 15:02:37 +0100 Subject: Bug #31958 Slave stuck and stops processing relay logs --- storage/ndb/src/ndbapi/TransporterFacade.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 19b384d8dc2..f2a250a49f7 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -1498,9 +1498,9 @@ void PollGuard::unlock_and_signal() if (t_signal_cond_waiter) t_signal_cond_waiter->set_poll_owner(true); } - m_tp->unlock_mutex(); if (t_signal_cond_waiter) t_signal_cond_waiter->cond_signal(); + m_tp->unlock_mutex(); m_locked=false; } -- cgit v1.2.1 From 429abc58452ce57256d6aacaece77d1672bccadf Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 16:07:24 +0200 Subject: Bug #32268: Indexed queries give bogus MIN and MAX results Loose index scan does the grouping so the temp table does not need to do it, even when sorting. Fixed by checking if the grouping is already done before doing sorting and grouping in a temp table and do only sorting. mysql-test/r/group_min_max.result: Bug #32268: test case mysql-test/t/group_min_max.test: Bug #32268: test case sql/sql_select.cc: Bug #32268: don't group in the temp table if already done --- mysql-test/r/group_min_max.result | 46 +++++++++++++++++++++++++++++++++++++++ mysql-test/t/group_min_max.test | 24 ++++++++++++++++++++ sql/sql_select.cc | 3 ++- 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result index 2e5193f8563..5982931e677 100644 --- a/mysql-test/r/group_min_max.result +++ b/mysql-test/r/group_min_max.result @@ -2307,3 +2307,49 @@ a 2 4 DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3); +INSERT INTO t1 SELECT a + 1, b FROM t1; +INSERT INTO t1 SELECT a + 2, b FROM t1; +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +a MIN(b) MAX(b) +4 1 3 +3 1 3 +2 1 3 +1 1 3 +CREATE INDEX break_it ON t1 (a, b); +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range NULL break_it 10 NULL 7 Using index for group-by +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a; +a MIN(b) MAX(b) +1 1 3 +2 1 3 +3 1 3 +4 1 3 +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range NULL break_it 10 NULL 7 Using index for group-by; Using temporary; Using filesort +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +a MIN(b) MAX(b) +4 1 3 +3 1 3 +2 1 3 +1 1 3 +EXPLAIN +SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL break_it 10 NULL 12 Using index +SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC; +a MIN(b) MAX(b) AVG(b) +4 1 3 2.0000 +3 1 3 2.0000 +2 1 3 2.0000 +1 1 3 2.0000 +DROP TABLE t1; diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test index e1010c9fcfe..7f2607b513d 100644 --- a/mysql-test/t/group_min_max.test +++ b/mysql-test/t/group_min_max.test @@ -888,7 +888,31 @@ SELECT SQL_BIG_RESULT DISTINCT(a) FROM t1; DROP TABLE t1; +# +# Bug #32268: Indexed queries give bogus MIN and MAX results +# + +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3); +INSERT INTO t1 SELECT a + 1, b FROM t1; +INSERT INTO t1 SELECT a + 2, b FROM t1; +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +CREATE INDEX break_it ON t1 (a, b); +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a; +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a; +EXPLAIN +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; +SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC; + +EXPLAIN +SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC; +SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC; + +DROP TABLE t1; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3d3b8668a79..757bfb3a29e 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -10256,7 +10256,8 @@ Next_select_func setup_end_select_func(JOIN *join) /* Set up select_end */ if (table) { - if (table->group && tmp_tbl->sum_func_count) + if (table->group && tmp_tbl->sum_func_count && + !tmp_tbl->precomputed_group_by) { if (table->s->keys) { -- cgit v1.2.1 From b217fd72d35341dac2baffff32ba7a80cd2e2ebe Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 15:08:48 +0100 Subject: ndb - bug#32519 Add *correct* check when a LCP is restorable by looking at maxGciStarted from LCP_FRAG_REP (for all fragments) bug observed by running testSystemRestart -n SR_DD_1_LCP T1 (causes "incorrect" behaviour in 51-ndb, and crash when using 2 LCP) storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: ndb - bug#32519 Add *correct* check when a LCP is restorable by looking at maxGciStarted from LCP_FRAG_REP (for all fragments) --- storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 28378c41f25..fd4a1fff3c9 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -10678,6 +10678,12 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal) Uint32 started = lcpReport->maxGciStarted; Uint32 completed = lcpReport->maxGciCompleted; + if (started > c_lcpState.lcpStopGcp) + { + jam(); + c_lcpState.lcpStopGcp = started; + } + if(tableDone){ jam(); @@ -11218,7 +11224,12 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal) signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type signal->theData[1] = SYSFILE->latestLCP_ID; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - c_lcpState.lcpStopGcp = c_newest_restorable_gci; + + if (c_newest_restorable_gci > c_lcpState.lcpStopGcp) + { + jam(); + c_lcpState.lcpStopGcp = c_newest_restorable_gci; + } /** * Start checking for next LCP @@ -12088,13 +12099,12 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr, lcpNo = fmgReplicaPtr.p->nextLcp; do { ndbrequire(lcpNo < MAX_LCP_STORED); - if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID && - fmgReplicaPtr.p->maxGciStarted[lcpNo] < c_newest_restorable_gci) + if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) { jam(); keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo]; oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo]; - ndbrequire(((int)oldestRestorableGci) >= 0); + ndbassert(fmgReplicaPtr.p->maxGciStarted[lcpNo] Date: Tue, 20 Nov 2007 20:15:20 +0400 Subject: Fixed bug #32533. 8bit escape characters, termination and enclosed characters were silently ignored by SELECT INTO query, but LOAD DATA INFILE algorithm is 8bit-clean, so data was corrupted during encoding. sql/sql_class.cc: Fixed bug #32533. SELECT INTO OUTFILE encoding was not 8bit clear, it has been fixed for a symmetry with the LOAD DATA INFILE decoding algorithm. mysql-test/t/outfile_loaddata.test: Added test case for bug #32533. mysql-test/r/outfile_loaddata.result: Added test case for bug #32533. --- mysql-test/r/outfile_loaddata.result | 18 ++++++++++++++++++ mysql-test/t/outfile_loaddata.test | 24 ++++++++++++++++++++++++ sql/sql_class.cc | 17 ++++++++++------- 3 files changed, 52 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/outfile_loaddata.result b/mysql-test/r/outfile_loaddata.result index 1bcaf308b7c..4a9bdcf412d 100644 --- a/mysql-test/r/outfile_loaddata.result +++ b/mysql-test/r/outfile_loaddata.result @@ -82,4 +82,22 @@ c1 c2 -r- =raker= DROP TABLE t2; DROP TABLE t1; +# +# Bug#32533: SELECT INTO OUTFILE never escapes multibyte character +# +CREATE TABLE t1 (c1 VARCHAR(256)); +INSERT INTO t1 VALUES (0xC3); +SELECT HEX(c1) FROM t1; +HEX(c1) +C3 +SELECT * INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/bug32533.txt' FIELDS ENCLOSED BY 0xC3 FROM t1; +TRUNCATE t1; +SELECT HEX(LOAD_FILE('MYSQLTEST_VARDIR/tmp/bug32533.txt')); +HEX(LOAD_FILE('MYSQLTEST_VARDIR/tmp/bug32533.txt')) +C35CC3C30A +LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/bug32533.txt' INTO TABLE t1 FIELDS ENCLOSED BY 0xC3; +SELECT HEX(c1) FROM t1; +HEX(c1) +C3 +DROP TABLE t1; # End of 5.0 tests. diff --git a/mysql-test/t/outfile_loaddata.test b/mysql-test/t/outfile_loaddata.test index 2f6ac998b3d..2a120871e7d 100644 --- a/mysql-test/t/outfile_loaddata.test +++ b/mysql-test/t/outfile_loaddata.test @@ -86,4 +86,28 @@ DROP TABLE t2; DROP TABLE t1; +--echo # +--echo # Bug#32533: SELECT INTO OUTFILE never escapes multibyte character +--echo # + +CREATE TABLE t1 (c1 VARCHAR(256)); +INSERT INTO t1 VALUES (0xC3); +SELECT HEX(c1) FROM t1; + +--let $file=$MYSQLTEST_VARDIR/tmp/bug32533.txt + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT * INTO OUTFILE '$file' FIELDS ENCLOSED BY 0xC3 FROM t1 +TRUNCATE t1; + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT HEX(LOAD_FILE('$file')) + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval LOAD DATA INFILE '$file' INTO TABLE t1 FIELDS ENCLOSED BY 0xC3 +SELECT HEX(c1) FROM t1; + +--remove_file $file +DROP TABLE t1; + --echo # End of 5.0 tests. diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ef199b6f883..93f5a34d5c6 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1219,16 +1219,18 @@ select_export::prepare(List &list, SELECT_LEX_UNIT *u) } } field_term_length=exchange->field_term->length(); - field_term_char= field_term_length ? (*exchange->field_term)[0] : INT_MAX; + field_term_char= field_term_length ? + (int) (uchar) (*exchange->field_term)[0] : INT_MAX; if (!exchange->line_term->length()) exchange->line_term=exchange->field_term; // Use this if it exists - field_sep_char= (exchange->enclosed->length() ? (*exchange->enclosed)[0] : - field_term_char); - escape_char= (exchange->escaped->length() ? (*exchange->escaped)[0] : -1); + field_sep_char= (exchange->enclosed->length() ? + (int) (uchar) (*exchange->enclosed)[0] : field_term_char); + escape_char= (exchange->escaped->length() ? + (int) (uchar) (*exchange->escaped)[0] : -1); is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char)); is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char)); line_sep_char= (exchange->line_term->length() ? - (*exchange->line_term)[0] : INT_MAX); + (int) (uchar) (*exchange->line_term)[0] : INT_MAX); if (!field_term_length) exchange->opt_enclosed=0; if (!exchange->enclosed->length()) @@ -1385,10 +1387,11 @@ bool select_export::send_data(List &items) Don't escape field_term_char by doubling - doubling is only valid for ENCLOSED BY characters: */ - (enclosed || !is_ambiguous_field_term || *pos != field_term_char)) + (enclosed || !is_ambiguous_field_term || + (int) (uchar) *pos != field_term_char)) { char tmp_buff[2]; - tmp_buff[0]= ((int) *pos == field_sep_char && + tmp_buff[0]= ((int) (uchar) *pos == field_sep_char && is_ambiguous_field_sep) ? field_sep_char : escape_char; tmp_buff[1]= *pos ? *pos : '0'; -- cgit v1.2.1 From 870d46819e517ec9dcd7b0e077147c69dbafa929 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2007 19:18:21 +0200 Subject: Bug #32400: Complex SELECT query returns correct result only on some occasions Referencing an element from the SELECT list in a WHERE clause is not permitted. The namespace of the WHERE clause is the table columns only. This was not enforced correctly when resolving outer references in sub-queries. Fixed by not allowing references to aliases in a sub-query in WHERE. mysql-test/include/ps_query.inc: Bug #32400: fixed old test queries mysql-test/r/ps_2myisam.result: Bug #32400: fixed old test queries mysql-test/r/ps_3innodb.result: Bug #32400: fixed old test queries mysql-test/r/ps_4heap.result: Bug #32400: fixed old test queries mysql-test/r/ps_5merge.result: Bug #32400: fixed old test queries mysql-test/r/ps_6bdb.result: Bug #32400: fixed old test queries mysql-test/r/ps_7ndb.result: Bug #32400: fixed old test queries mysql-test/r/subselect.result: Bug #32400: test case mysql-test/t/subselect.test: Bug #32400: test case sql/item.cc: Bug #32400: don't allow references to aliases in WHERE tests/mysql_client_test.c: Bug #32400: fixed old test queries --- mysql-test/include/ps_query.inc | 4 ++-- mysql-test/r/ps_2myisam.result | 12 +++++----- mysql-test/r/ps_3innodb.result | 12 +++++----- mysql-test/r/ps_4heap.result | 12 +++++----- mysql-test/r/ps_5merge.result | 24 ++++++++++---------- mysql-test/r/ps_6bdb.result | 12 +++++----- mysql-test/r/ps_7ndb.result | 12 +++++----- mysql-test/r/subselect.result | 47 ++++++++++++++++++++++++++++++-------- mysql-test/t/subselect.test | 50 ++++++++++++++++++++++++++++++++++++----- sql/item.cc | 2 +- tests/mysql_client_test.c | 2 +- 11 files changed, 128 insertions(+), 61 deletions(-) diff --git a/mysql-test/include/ps_query.inc b/mysql-test/include/ps_query.inc index e96d666eaec..ae6027a0e07 100644 --- a/mysql-test/include/ps_query.inc +++ b/mysql-test/include/ps_query.inc @@ -434,8 +434,8 @@ execute stmt1 ; let $1= 3 ; while ($1) { - prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; + prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; deallocate prepare stmt1 ; dec $1 ; diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index 57932a6c455..3e0308330fa 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -783,20 +783,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index fd24c29d558..f3cdbedce6f 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -783,20 +783,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index b4596ab85bc..8477c05c787 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -784,20 +784,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 18982db937a..1bb16cf0a85 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -826,20 +826,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 @@ -3848,20 +3848,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result index 0e4086bc202..2a52aedbde0 100644 --- a/mysql-test/r/ps_6bdb.result +++ b/mysql-test/r/ps_6bdb.result @@ -783,20 +783,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result index 7a20fb3146d..2da7e7e40c2 100644 --- a/mysql-test/r/ps_7ndb.result +++ b/mysql-test/r/ps_7ndb.result @@ -783,20 +783,20 @@ a b 2 two 3 three 4 four -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 deallocate prepare stmt1 ; -prepare stmt1 from ' SELECT a as ccc from t1 where a+1= - (SELECT 1+ccc from t1 where ccc+1=a+1 and a=1) '; +prepare stmt1 from ' SELECT a as ccc from t1 outr where a+1= + (SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1) '; execute stmt1 ; ccc 1 diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index d3c40ce5bd5..3f22118c1f7 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -77,10 +77,9 @@ ERROR HY000: Incorrect usage of PROCEDURE and subquery SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR HY000: Incorrect parameters to procedure 'ANALYSE' SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; -a +ERROR 42S22: Unknown column 'a' in 'field list' SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NOT NULL; -a -1 +ERROR 42S22: Unknown column 'a' in 'field list' SELECT (SELECT 1,2,3) = ROW(1,2,3); (SELECT 1,2,3) = ROW(1,2,3) 1 @@ -2304,24 +2303,27 @@ drop table t1,t2; CREATE TABLE t1 ( a int, b int ); CREATE TABLE t2 ( c int, d int ); INSERT INTO t1 VALUES (1,2), (2,3), (3,4); -SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); abc b 1 2 2 3 3 4 -INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t2; c d 1 2 2 3 3 4 -CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t3; abc b 1 2 2 3 3 4 -prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; deallocate prepare stmt1; select * from t2; @@ -2333,7 +2335,7 @@ c d 2 3 3 4 drop table t3; -prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; select * from t3; abc b @@ -2515,7 +2517,9 @@ INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,680 INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; -SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); +SELECT DISTINCT Continent AS c FROM t1 outr WHERE +Code <> SOME ( SELECT Code FROM t1 WHERE Continent = outr.Continent AND +Population < 200); c Oceania drop table t1; @@ -4303,4 +4307,29 @@ LEFT(t1.a1,1) SELECT a2 FROM t2 WHERE t2.a2 IN (SELECT t1.a1 FROM t1,t3 WHERE t1.b1=t3.a3); a2 DROP TABLE t1, t2, t3; +CREATE TABLE t1(a INT, b INT); +INSERT INTO t1 VALUES (1,1), (1,2), (2,3), (2,4); +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; +ERROR 42S22: Unknown column 'out_a' in 'where clause' +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; +ERROR 42S22: Unknown column 'out_a' in 'where clause' +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1_outer ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 4 Using where +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; +out_a MIN(b) +1 2 +2 4 +DROP TABLE t1; End of 5.0 tests. diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index ba6abbacf50..8c3775a1ba4 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -32,7 +32,9 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -- error 1108 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); +-- error ER_BAD_FIELD_ERROR SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; +-- error ER_BAD_FIELD_ERROR SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NOT NULL; SELECT (SELECT 1,2,3) = ROW(1,2,3); SELECT (SELECT 1,2,3) = ROW(1,2,1); @@ -1346,17 +1348,20 @@ drop table t1,t2; CREATE TABLE t1 ( a int, b int ); CREATE TABLE t2 ( c int, d int ); INSERT INTO t1 VALUES (1,2), (2,3), (3,4); -SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); -INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +SELECT a AS abc, b FROM t1 outr WHERE b = + (SELECT MIN(b) FROM t1 WHERE a=outr.a); +INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = + (SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t2; -CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = + (SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t3; -prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; deallocate prepare stmt1; select * from t2; drop table t3; -prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; select * from t3; deallocate prepare stmt1; @@ -1529,7 +1534,9 @@ INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,680 INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; -SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); +SELECT DISTINCT Continent AS c FROM t1 outr WHERE + Code <> SOME ( SELECT Code FROM t1 WHERE Continent = outr.Continent AND + Population < 200); drop table t1; # @@ -3140,4 +3147,35 @@ SELECT LEFT(t1.a1,1) FROM t1,t3 WHERE t1.b1=t3.a3; SELECT a2 FROM t2 WHERE t2.a2 IN (SELECT t1.a1 FROM t1,t3 WHERE t1.b1=t3.a3); DROP TABLE t1, t2, t3; +# +# Bug #32400: Complex SELECT query returns correct result only on some +# occasions +# + +CREATE TABLE t1(a INT, b INT); +INSERT INTO t1 VALUES (1,1), (1,2), (2,3), (2,4); + +--error ER_BAD_FIELD_ERROR +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; + +--error ER_BAD_FIELD_ERROR +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; + +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; + +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; + +DROP TABLE t1; + + --echo End of 5.0 tests. diff --git a/sql/item.cc b/sql/item.cc index 431d82af331..3555df40060 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3641,7 +3641,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) } /* Search in SELECT and GROUP lists of the outer select. */ - if (outer_context->resolve_in_select_list) + if (place != IN_WHERE && place != IN_ON) { if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) return -1; /* Some error occurred (e.g. ambiguous names). */ diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 65e18da97f1..823878db23d 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -9283,7 +9283,7 @@ static void test_subqueries_ref() { MYSQL_STMT *stmt; int rc, i; - const char *query= "SELECT a as ccc from t1 where a+1=(SELECT 1+ccc from t1 where ccc+1=a+1 and a=1)"; + const char *query= "SELECT a as ccc from t1 outr where a+1=(SELECT 1+outr.a from t1 where outr.a+1=a+1 and a=1)"; myheader("test_subqueries_ref"); -- cgit v1.2.1 From 3a0d1f300bd3982f8332659270c531ecd8d1f288 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Nov 2007 02:48:01 +0300 Subject: sql_select.cc: Additional stack check for the bug#31048. sql/sql_select.cc: Additional stack check for the bug#31048. --- sql/sql_select.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3d3b8668a79..e25d0d78c87 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2332,6 +2332,11 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, { int error; DBUG_ENTER("get_quick_record_count"); +#ifndef EMBEDDED_LIBRARY // Avoid compiler warning + char buff[STACK_BUFF_ALLOC]; +#endif + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + DBUG_RETURN(0); // Fatal error flag is set if (select) { select->head=table; -- cgit v1.2.1 From e9832ceeac70a62b53d72a6c48e672165d658d8f Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Nov 2007 11:40:05 +0200 Subject: Bug #30788: Inconsistent retrieval of char/varchar Index lookup does not always guarantee that we can simply remove the relevant conditions from the WHERE clause. Reasons can be e.g. conversion errors, partial indexes etc. The optimizer was removing these parts of the WHERE condition without any further checking. This leads to "false positives" when using indexes. Fixed by checking the index reference conditions (using WHERE) when using indexes with sub-queries. mysql-test/r/subselect.result: Bug #30788: - using where - test case mysql-test/r/subselect3.result: Bug #30788: using where mysql-test/t/subselect.test: Bug #30788: test case sql/item.h: Bug #30788: - Declare eq() method of Item_cache descendants : this is used in test_if_ref() - preserve the field that is being cached for type comparisions sql/sql_select.cc: Bug #30788: Don't remove the WHERE when using index lookup with subqueries. --- mysql-test/r/subselect.result | 47 +++++++++++++++++++++++++++++++++++------- mysql-test/r/subselect3.result | 2 +- mysql-test/t/subselect.test | 25 ++++++++++++++++++++++ sql/item.h | 22 +++++++++++++++++++- sql/sql_select.cc | 26 +++++++++++++---------- 5 files changed, 101 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index be99bdb1afc..b4b6506a4de 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1307,7 +1307,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 Using where; Using index 2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 Using where Warnings: -Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on PRIMARY where (`test`.`t1`.`b` <> 30)))) +Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on PRIMARY where ((`test`.`t1`.`b` <> 30) and ((`test`.`t2`.`a`) = `test`.`t1`.`a`))))) select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a); a 2 @@ -1315,8 +1315,8 @@ a explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 Using where; Using index -2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 func 1 Using where -2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using where; Using index +2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 func 1 +2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index Warnings: Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(select 1 AS `Not_used` from `test`.`t1` join `test`.`t3` where ((`test`.`t3`.`a` = `test`.`t1`.`b`) and ((`test`.`t2`.`a`) = `test`.`t1`.`a`)))) drop table t1, t2, t3; @@ -1334,9 +1334,9 @@ a explain extended select * from t2 where t2.a in (select a from t1); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL a 5 NULL 4 Using where; Using index -2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 Using index +2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 Using index; Using where Warnings: -Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a))) +Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a where ((`test`.`t2`.`a`) = `test`.`t1`.`a`)))) select * from t2 where t2.a in (select a from t1 where t1.b <> 30); a 2 @@ -1346,7 +1346,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL a 5 NULL 4 Using where; Using index 2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 Using index; Using where Warnings: -Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a where (`test`.`t1`.`b` <> 30)))) +Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a where ((`test`.`t1`.`b` <> 30) and ((`test`.`t2`.`a`) = `test`.`t1`.`a`))))) select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a); a 2 @@ -1373,7 +1373,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 index NULL a 5 NULL 4 Using where; Using index 2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 Using index; Using where Warnings: -Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a where (`test`.`t1`.`b` <> 30)))) +Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(((`test`.`t2`.`a`) in t1 on a where ((`test`.`t1`.`b` <> 30) and ((`test`.`t2`.`a`) = `test`.`t1`.`a`))))) drop table t1, t2, t3; create table t1 (a int, b int); create table t2 (a int, b int); @@ -1737,7 +1737,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 12 Using where 2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 Using index; Using where Warnings: -Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`text` AS `text` from `test`.`t1` where (not((`test`.`t1`.`id`,(((`test`.`t1`.`id`) in t1 on PRIMARY where (`test`.`t1`.`id` < 8)))))) +Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`text` AS `text` from `test`.`t1` where (not((`test`.`t1`.`id`,(((`test`.`t1`.`id`) in t1 on PRIMARY where ((`test`.`t1`.`id` < 8) and ((`test`.`t1`.`id`) = `test`.`t1`.`id`))))))) explain extended select * from t1 as tt where not exists (select id from t1 where id < 8 and (id = tt.id or id is null) having id is not null); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY tt ALL NULL NULL NULL NULL 12 Using where @@ -4139,4 +4139,35 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; (SELECT SUM(t1.a) FROM t2 WHERE a=1) 3 DROP TABLE t1,t2; +CREATE TABLE t1 (a CHAR(1), b VARCHAR(10)); +INSERT INTO t1 VALUES ('a', 'aa'); +INSERT INTO t1 VALUES ('a', 'aaa'); +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); +a b +CREATE INDEX I1 ON t1 (a); +CREATE INDEX I2 ON t1 (b); +EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); +a b +CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10)); +INSERT INTO t2 SELECT * FROM t1; +CREATE INDEX I1 ON t2 (a); +CREATE INDEX I2 ON t2 (b); +EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY t2 index_subquery I1 I1 4 func 2 Using index; Using where +SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2); +a b +EXPLAIN +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500); +a b +DROP TABLE t1,t2; End of 5.0 tests. diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result index 098dacc8189..bdf00e4c307 100644 --- a/mysql-test/r/subselect3.result +++ b/mysql-test/r/subselect3.result @@ -757,6 +757,6 @@ a EXPLAIN SELECT a FROM t1 WHERE a NOT IN (SELECT a FROM t2); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where -2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY PRIMARY 4 func 1 Using index +2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY PRIMARY 4 func 1 Using index; Using where DROP TABLE t1; End of 5.0 tests diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index d076ca6bd33..9154e67c4fb 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -2987,4 +2987,29 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a!=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; DROP TABLE t1,t2; +# +# Bug #30788: Inconsistent retrieval of char/varchar +# + +CREATE TABLE t1 (a CHAR(1), b VARCHAR(10)); +INSERT INTO t1 VALUES ('a', 'aa'); +INSERT INTO t1 VALUES ('a', 'aaa'); +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); +CREATE INDEX I1 ON t1 (a); +CREATE INDEX I2 ON t1 (b); +EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1); + +CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10)); +INSERT INTO t2 SELECT * FROM t1; +CREATE INDEX I1 ON t2 (a); +CREATE INDEX I2 ON t2 (b); +EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2); +SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2); +EXPLAIN +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500); +SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500); + +DROP TABLE t1,t2; + --echo End of 5.0 tests. diff --git a/sql/item.h b/sql/item.h index b611c59b8f1..ba65014d5e6 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2452,8 +2452,18 @@ class Item_cache: public Item protected: Item *example; table_map used_table_map; + /* + Field that this object will get value from. This is set/used by + index-based subquery engines to detect and remove the equality injected + by IN->EXISTS transformation. + For all other uses of Item_cache, cached_field doesn't matter. + */ + Field *cached_field; public: - Item_cache(): example(0), used_table_map(0) {fixed= 1; null_value= 1;} + Item_cache(): example(0), used_table_map(0), cached_field(0) + { + fixed= 1; null_value= 1; + } void set_used_tables(table_map map) { used_table_map= map; } @@ -2465,6 +2475,8 @@ public: decimals= item->decimals; collation.set(item->collation); unsigned_flag= item->unsigned_flag; + if (item->type() == FIELD_ITEM) + cached_field= ((Item_field *)item)->field; return 0; }; virtual void store(Item *)= 0; @@ -2475,6 +2487,14 @@ public: // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} void print(String *str); + bool eq_def(Field *field) + { + return cached_field ? cached_field->eq_def (field) : FALSE; + } + bool eq(const Item *item, bool binary_cmp) const + { + return this == item; + } }; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 7af39071561..211ac416c6d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -217,6 +217,7 @@ static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); +static bool test_if_ref(Item_field *left_item,Item *right_item); /* @@ -673,9 +674,6 @@ err: without "checking NULL", remove the predicates that were pushed down into the subquery. - We can remove the equalities that will be guaranteed to be true by the - fact that subquery engine will be using index lookup. - If the subquery compares scalar values, we can remove the condition that was wrapped into trig_cond (it will be checked when needed by the subquery engine) @@ -685,6 +683,12 @@ err: and non-NULL values, we'll do a full table scan and will rely on the equalities corresponding to non-NULL parts of left tuple to filter out non-matching records. + + TODO: We can remove the equalities that will be guaranteed to be true by the + fact that subquery engine will be using index lookup. This must be done only + for cases where there are no conversion errors of significance, e.g. 257 + that is searched in a byte. But this requires homogenization of the return + codes of all Field*::store() methods. */ void JOIN::remove_subq_pushed_predicates(Item **where) @@ -692,17 +696,13 @@ void JOIN::remove_subq_pushed_predicates(Item **where) if (conds->type() == Item::FUNC_ITEM && ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC && ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM && - ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM) + ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM && + test_if_ref ((Item_field *)((Item_func *)conds)->arguments()[1], + ((Item_func *)conds)->arguments()[0])) { *where= 0; return; } - if (conds->type() == Item::COND_ITEM && - ((class Item_func *)this->conds)->functype() == - Item_func::COND_AND_FUNC) - { - *where= remove_additional_cond(conds); - } } @@ -1219,7 +1219,7 @@ JOIN::optimize() { if (!having) { - Item *where= 0; + Item *where= conds; if (join_tab[0].type == JT_EQ_REF && join_tab[0].ref.items[0]->name == in_left_expr_name) { @@ -11862,8 +11862,12 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) Item *ref_item=part_of_refkey(field->table,field); if (ref_item && ref_item->eq(right_item,1)) { + right_item= right_item->real_item(); if (right_item->type() == Item::FIELD_ITEM) return (field->eq_def(((Item_field *) right_item)->field)); + /* remove equalities injected by IN->EXISTS transformation */ + else if (right_item->type() == Item::CACHE_ITEM) + return ((Item_cache *)right_item)->eq_def (field); if (right_item->const_item() && !(right_item->is_null())) { /* -- cgit v1.2.1 From 16f15dba44132121f025e7308e59247c2a0051c9 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Nov 2007 15:56:45 +0200 Subject: merge of bug 30788 to 5.1-opt --- mysql-test/r/subselect.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 5d7c51578d1..fe7664f9b9c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1315,7 +1315,7 @@ a explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 100.00 Using where; Using index -2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where +2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 func 1 100.00 2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index Warnings: Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` where (`test`.`t2`.`a`,(select 1 AS `Not_used` from `test`.`t1` join `test`.`t3` where ((`test`.`t3`.`a` = `test`.`t1`.`b`) and ((`test`.`t2`.`a`) = `test`.`t1`.`a`)))) -- cgit v1.2.1 From 0b489d3fd371d6d548f94249cf10d9435df2c717 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Nov 2007 18:52:38 +0200 Subject: Bug #31171: test alter_table, fulltext2, ps, sp fail, "Table is already up to date" vs. "OK" On MacOSX 10.5 when you cast something to "bool" (the built in C type) it takes values 0 or 1 instead of 0-255 as it seems to be on older compilers. Fixed by removing the typecast (not needed). No test case needed : there are tests that test it. storage/myisam/mi_open.c: Bug #31171: don't typecast when not needed --- storage/myisam/mi_open.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index b848c822f75..0f5d820488c 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -916,7 +916,7 @@ uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state) key_blocks=state->header.max_block_size_index; state->open_count = mi_uint2korr(ptr); ptr +=2; - state->changed= (bool) *ptr++; + state->changed= *ptr++; state->sortkey = (uint) *ptr++; state->state.records= mi_rowkorr(ptr); ptr +=8; state->state.del = mi_rowkorr(ptr); ptr +=8; -- cgit v1.2.1 From a7c04594b8aced94b04329605f7789bb644baf91 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Nov 2007 22:56:42 +0400 Subject: Fixed bug #32556: assert in "using index for group-by" : is_last_prefix <= 0, file .\opt_range.cc. SELECT ... GROUP BY bit field failed with an assertion if the bit length of that field was not divisible by 8. sql/key.cc: Fixed bug #32556. Copying of "uneven" bits of a bit field was duplicated in the key_copy() and in the Field_bit::get_key_image(). So, instead of copying of the rest of a bit field, Field_bit::get_key_image() copied "uneven" bits to key image again, and the lowest field byte was not copied to key at all. Duplicated code has been removed from the key_copy function. mysql-test/t/type_bit.test: Added test case for bug #32556. mysql-test/r/type_bit.result: Added test case for bug #32556. --- mysql-test/r/type_bit.result | 10 ++++++++++ mysql-test/t/type_bit.test | 15 +++++++++++++++ sql/key.cc | 13 ------------- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index 4c1b80c2fd5..917f78d8f17 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -672,4 +672,14 @@ COUNT(DISTINCT b,c) 2 2 DROP TABLE t2; +CREATE TABLE t1(a BIT(13), KEY(a)); +INSERT INTO t1(a) VALUES +(65535),(65525),(65535),(65535),(65535),(65535),(65535),(65535),(65535),(65535); +EXPLAIN SELECT 1 FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range NULL a 3 NULL 6 Using index for group-by +SELECT 1 FROM t1 GROUP BY a; +1 +1 +DROP TABLE t1; End of 5.0 tests diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test index 4978f55d776..3095311bfbc 100644 --- a/mysql-test/t/type_bit.test +++ b/mysql-test/t/type_bit.test @@ -318,4 +318,19 @@ INSERT INTO t2 VALUES (3, 2, 'two'), (2, 3, 'three'), (2, 0, 'zero'), SELECT COUNT(DISTINCT b,c) FROM t2 GROUP BY a; DROP TABLE t2; +# +# BUG#32556 assert in "using index for group-by" : is_last_prefix <= 0, +# file .\opt_range.cc + +CREATE TABLE t1(a BIT(13), KEY(a)); +--disable_warnings +INSERT INTO t1(a) VALUES +(65535),(65525),(65535),(65535),(65535),(65535),(65535),(65535),(65535),(65535); +--enable_warnings + +EXPLAIN SELECT 1 FROM t1 GROUP BY a; +SELECT 1 FROM t1 GROUP BY a; + +DROP TABLE t1; + --echo End of 5.0 tests diff --git a/sql/key.cc b/sql/key.cc index 2bdde46b6b3..1c044f3dc7d 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -106,19 +106,6 @@ void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) key_part->null_bit); key_length--; } - if (key_part->type == HA_KEYTYPE_BIT) - { - Field_bit *field= (Field_bit *) (key_part->field); - if (field->bit_len) - { - uchar bits= get_rec_bits((uchar*) from_record + - key_part->null_offset + - (key_part->null_bit == 128), - field->bit_ofs, field->bit_len); - *to_key++= bits; - key_length--; - } - } if (key_part->key_part_flag & HA_BLOB_PART || key_part->key_part_flag & HA_VAR_LENGTH_PART) { -- cgit v1.2.1 From 24b8074f63923f3af682a12989fe7825b6da27dc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Nov 2007 14:59:16 +0100 Subject: Bug #29139 missing error message for 275 --- storage/ndb/src/ndbapi/ndberror.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 0ad2faff76a..ef2ab3b709b 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = { { 219, DMEC, TR, "219" }, { 233, DMEC, TR, "Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" }, - { 275, DMEC, TR, "275" }, + { 275, DMEC, TR, "Out of transaction records for complete phase (increase MaxNoOfConcurrentTransactions)" }, { 279, DMEC, TR, "Out of transaction markers in transaction coordinator" }, { 414, DMEC, TR, "414" }, { 418, DMEC, TR, "Out of transaction buffers in LQH" }, -- cgit v1.2.1 From ae5ec36bcb2f20fc63c7b069330d3b826a68f9da Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Nov 2007 17:13:12 +0300 Subject: Fixed build failures on Windows introduced by the patch for bug #32221. We do not have any executables in libmysql/release/ anymore. --- scripts/make_win_bin_dist | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/make_win_bin_dist b/scripts/make_win_bin_dist index 849226c94ea..d065e171bb0 100755 --- a/scripts/make_win_bin_dist +++ b/scripts/make_win_bin_dist @@ -152,7 +152,6 @@ if [ x"$TARGET" != x"release" ] ; then cp server-tools/instance-manager/$TARGET/*.pdb $DESTDIR/bin/ fi cp tests/$TARGET/*.exe $DESTDIR/bin/ -cp libmysql/$TARGET/*.exe $DESTDIR/bin/ cp libmysql/$TARGET/libmysql.dll $DESTDIR/bin/ # FIXME really needed?! -- cgit v1.2.1 From 67522fcb9f92f8286060eeac3b5f054576b16735 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Nov 2007 19:22:54 +0300 Subject: Fix for bug #29976: Excessive Slave I/O errors in replication tests Problem: The "Slave I/O thread couldn't register on master" error sporadically occurred in replication tests because the slave I/O thread got killed by STOP SLAVE before or while registering on master. Fixed by checking the state of the I/O thread, and issueing the error only if it was not explicitely killed by a user. sql/slave.cc: When the slave I/O thread fails to register on master, issue an error message only if it is not explicitely killed by a user with STOP SLAVE. --- sql/slave.cc | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/sql/slave.cc b/sql/slave.cc index fcbd4eb841b..9d8b0dc95eb 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -137,6 +137,7 @@ static int terminate_slave_thread(THD *thd, pthread_cond_t* term_cond, volatile uint *slave_running, bool skip_lock); +static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info); /* Find out which replications threads are running @@ -821,7 +822,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) mi->clock_diff_with_master= (long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10)); } - else + else if (!check_io_slave_killed(mi->io_thd, mi, NULL)) { mi->clock_diff_with_master= 0; /* The "most sensible" value */ sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, " @@ -1223,7 +1224,7 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi, { *suppress_warnings= TRUE; // Suppress reconnect warning } - else + else if (!check_io_slave_killed(mi->io_thd, mi, NULL)) { char buf[256]; my_snprintf(buf, sizeof(buf), "%s (Errno: %d)", mysql_error(mysql), @@ -1985,7 +1986,7 @@ static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info) { if (io_slave_killed(thd, mi)) { - if (global_system_variables.log_warnings) + if (info && global_system_variables.log_warnings) sql_print_information(info); return TRUE; } @@ -2170,11 +2171,15 @@ connected: thd->proc_info = "Registering slave on master"; if (register_slave_on_master(mysql, mi, &suppress_warnings)) { - sql_print_error("Slave I/O thread couldn't register on master"); - if (check_io_slave_killed(thd, mi, "Slave I/O thread killed while \ -registering slave on master") || - try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings, - reconnect_messages[SLAVE_RECON_ACT_REG])) + if (!check_io_slave_killed(thd, mi, "Slave I/O thread killed " + "while registering slave on master")) + { + sql_print_error("Slave I/O thread couldn't register on master"); + if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings, + reconnect_messages[SLAVE_RECON_ACT_REG])) + goto err; + } + else goto err; goto connected; } -- cgit v1.2.1 From a197c4e95ddd69cf9ebe3a6b2796a68f420c009a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 00:16:17 +0300 Subject: opt_range.cc: Fix for the bug#31048 for 64bit platforms. subselect.test, subselect.result: Corrected text case for the bug#31048. mysql-test/t/subselect.test: Corrected text case for the bug#31048. mysql-test/r/subselect.result: Corrected text case for the bug#31048. sql/opt_range.cc: Fix for the bug#31048 for 64bit platforms. --- mysql-test/r/subselect.result | 15 ++++++--------- mysql-test/t/subselect.test | 10 ++++------ sql/opt_range.cc | 2 +- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index a99b5e5925b..4c398b12a8c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -4148,9 +4148,8 @@ select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 -)group by b limit 1)group by b limit 1)group by b limit 1 +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1) @@ -4191,9 +4190,8 @@ select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 -)group by b limit 1)group by b limit 1)group by b limit 1 +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1) @@ -4210,9 +4208,8 @@ id select_type table type possible_keys key key_len ref rows Extra 9 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort 10 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort 11 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort -12 SUBQUERY t1 range a a 5 NULL 9 Using where; Using temporary; Using filesort -13 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort -14 SUBQUERY t1 index NULL a 5 NULL 9 Using index +12 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort +13 SUBQUERY t1 index NULL a 5 NULL 9 Using index explain select sum(a),a from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 64b390e413d..562794ba501 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -3000,9 +3000,8 @@ select sum(a),a from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 - )group by b limit 1)group by b limit 1)group by b limit 1 + select sum(a) from t1 where a> ( select sum(a) from t1 + )group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1) @@ -3044,9 +3043,8 @@ explain select sum(a),a from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 - )group by b limit 1)group by b limit 1)group by b limit 1 + select sum(a) from t1 where a> ( select sum(a) from t1 + )group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1 )group by b limit 1)group by b limit 1)group by b limit 1) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index dbdb2b919dc..1a3c2bec621 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1987,7 +1987,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, KEY *key_info; PARAM param; - if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + if (check_stack_overrun(thd, 2*STACK_MIN_SIZE, buff)) DBUG_RETURN(0); // Fatal error flag is set /* set up parameter that is passed to all functions */ -- cgit v1.2.1 From 41362e64ee2358f4f3803c4a07a728b51180e92d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 10:25:19 +0100 Subject: Bug#29621 - ndb_mgmd can timeout logevent request, making it to fail --- storage/ndb/src/mgmsrv/MgmtSrvr.cpp | 82 +++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index ee5bb5103d8..a00c68007a7 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -119,7 +119,11 @@ operator<<(NdbOut& out, const LogLevel & ll) void MgmtSrvr::logLevelThreadRun() { - while (!_isStopThread) { + while (!_isStopThread) + { + Vector failed_started_nodes; + Vector failed_log_level_requests; + /** * Handle started nodes */ @@ -144,14 +148,15 @@ MgmtSrvr::logLevelThreadRun() m_started_nodes.unlock(); if (setEventReportingLevelImpl(node, req)) - { - ndbout_c("setEventReportingLevelImpl(%d): failed", node); - } - - SetLogLevelOrd ord; - ord = m_nodeLogLevel[node]; - setNodeLogLevelImpl(node, ord); - + { + failed_started_nodes.push_back(node); + } + else + { + SetLogLevelOrd ord; + ord = m_nodeLogLevel[node]; + setNodeLogLevelImpl(node, ord); + } m_started_nodes.lock(); } } @@ -166,17 +171,20 @@ MgmtSrvr::logLevelThreadRun() if(req.blockRef == 0) { - req.blockRef = _ownReference; - if (setEventReportingLevelImpl(0, req)) - { - ndbout_c("setEventReportingLevelImpl: failed 2!"); - } + req.blockRef = _ownReference; + if (setEventReportingLevelImpl(0, req)) + { + failed_log_level_requests.push_back(req); + } } else { SetLogLevelOrd ord; ord = req; - setNodeLogLevelImpl(req.blockRef, ord); + if (setNodeLogLevelImpl(req.blockRef, ord)) + { + failed_log_level_requests.push_back(req); + } } m_log_level_requests.lock(); } @@ -185,7 +193,28 @@ MgmtSrvr::logLevelThreadRun() if(!ERROR_INSERTED(10000)) m_event_listner.check_listeners(); - NdbSleep_MilliSleep(_logLevelThreadSleep); + Uint32 sleeptime = _logLevelThreadSleep; + if (failed_started_nodes.size()) + { + m_started_nodes.lock(); + for (Uint32 i = 0; i; template class MutexVector; template class MutexVector; +template class Vector; template class MutexVector; -- cgit v1.2.1 From bcd2abaaf86ec3ac9bfa0389ce5537d5d61fab54 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 10:46:48 +0100 Subject: bug#28445 - Heartbeat does not start until first API_REGREQ is recevied - move api failure handling into own method - add START_ORD so that hb checking can start really early storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: - make sure qmgr is "fully" informed about connections so that it can handle hb correctly - dont allow API/mysqld node to reconnect if we have not started yet (sp 8) storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp: - move api failure handling into own method - add START_ORD so that hb checking can start really early storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp: - move api failure handling into own method - add START_ORD so that hb checking can start really early - Init datastructures in constructor - as CONNECT_REP may occur before start phases storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: - Init datastructures in constructor - as CONNECT_REP may occur before start phases - start hb handling directly on connect rep (instead of first hb) --- storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 57 ++-- storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 3 + storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp | 26 +- storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 362 +++++++++++++----------- 4 files changed, 243 insertions(+), 205 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 3406176d7a8..6c869435bfa 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -421,9 +421,10 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal) // Uint32 noOfNodes = closeCom->noOfNodes; jamEntry(); - for (unsigned i = 0; i < MAX_NODES; i++){ - if(NodeBitmask::get(closeCom->theNodes, i)){ - + for (unsigned i = 0; i < MAX_NODES; i++) + { + if(NodeBitmask::get(closeCom->theNodes, i)) + { jam(); //----------------------------------------------------- @@ -437,7 +438,9 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal) globalTransporterRegistry.do_disconnect(i); } } - if (failNo != 0) { + + if (failNo != 0) + { jam(); signal->theData[0] = userRef; signal->theData[1] = failNo; @@ -456,13 +459,21 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal) jamEntry(); const Uint32 len = signal->getLength(); - if(len == 2){ - + if(len == 2) + { #ifdef ERROR_INSERT if (! ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002)) && c_error_9000_nodes_mask.get(tStartingNode))) #endif { + if (globalData.theStartLevel != NodeState::SL_STARTED && + (getNodeInfo(tStartingNode).m_type != NodeInfo::DB && + getNodeInfo(tStartingNode).m_type != NodeInfo::MGM)) + { + jam(); + goto done; + } + globalTransporterRegistry.do_connect(tStartingNode); globalTransporterRegistry.setIOState(tStartingNode, HaltIO); @@ -475,9 +486,11 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal) //----------------------------------------------------- } } else { - for(unsigned int i = 1; i < MAX_NODES; i++ ) { + for(unsigned int i = 1; i < MAX_NODES; i++ ) + { jam(); - if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2){ + if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2) + { jam(); #ifdef ERROR_INSERT @@ -496,6 +509,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal) } } +done: if (userRef != 0) { jam(); signal->theData[0] = tStartingNode; @@ -536,24 +550,10 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal) setNodeInfo(hostId).m_connectCount++; const NodeInfo::NodeType type = getNodeInfo(hostId).getType(); ndbrequire(type != NodeInfo::INVALID); - - if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){ - jam(); - DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0]; - rep->nodeId = hostId; - rep->err = errNo; - sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal, - DisconnectRep::SignalLength, JBA); - } else if((globalData.theStartLevel == NodeState::SL_CMVMI || - globalData.theStartLevel == NodeState::SL_STARTING) - && type == NodeInfo::MGM) { - /** - * Someone disconnected during cmvmi period - */ - jam(); - globalTransporterRegistry.do_connect(hostId); - } + sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal, + DisconnectRep::SignalLength, JBA); + cancelSubscription(hostId); signal->theData[0] = NDB_LE_Disconnected; @@ -587,6 +587,8 @@ void Cmvmi::execCONNECT_REP(Signal *signal){ */ if(type == NodeInfo::MGM){ jam(); + signal->theData[0] = hostId; + sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA); } else { /** * Dont allow api nodes to connect @@ -802,6 +804,8 @@ Cmvmi::execSTART_ORD(Signal* signal) { } } } + + EXECUTE_DIRECT(QMGR, GSN_START_ORD, signal, 1); return ; } @@ -829,9 +833,6 @@ Cmvmi::execSTART_ORD(Signal* signal) { * * Do Restart */ - - globalScheduler.clear(); - globalTimeQueue.clear(); // Disconnect all nodes as part of the system restart. // We need to ensure that we are starting up diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 8d51b24ec6a..6a76ce5217a 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -265,6 +265,8 @@ private: void execALLOC_NODEID_CONF(Signal *); void execALLOC_NODEID_REF(Signal *); void completeAllocNodeIdReq(Signal *); + + void execSTART_ORD(Signal*); // Arbitration signals void execARBIT_CFG(Signal* signal); @@ -281,6 +283,7 @@ private: void check_readnodes_reply(Signal* signal, Uint32 nodeId, Uint32 gsn); Uint32 check_startup(Signal* signal); + void api_failed(Signal* signal, Uint32 aFailedNode); void node_failed(Signal* signal, Uint16 aFailedNode); void checkStartInterface(Signal* signal); void failReport(Signal* signal, diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index f9950072ab4..2f03bd56694 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -31,10 +31,6 @@ void Qmgr::initData() cnoCommitFailedNodes = 0; c_maxDynamicId = 0; c_clusterNodes.clear(); - - Uint32 hbDBAPI = 500; - setHbApiDelay(hbDBAPI); - c_connectedNodes.set(getOwnNodeId()); c_stopReq.senderRef = 0; /** @@ -43,6 +39,27 @@ void Qmgr::initData() ndbrequire((Uint32)NodeInfo::DB == 0); ndbrequire((Uint32)NodeInfo::API == 1); ndbrequire((Uint32)NodeInfo::MGM == 2); + + NodeRecPtr nodePtr; + nodePtr.i = getOwnNodeId(); + ptrAss(nodePtr, nodeRec); + nodePtr.p->blockRef = reference(); + + c_connectedNodes.set(getOwnNodeId()); + setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION; + + + /** + * Timeouts + */ + const ndb_mgm_configuration_iterator * p = + m_ctx.m_config.getOwnConfigIterator(); + ndbrequire(p != 0); + + Uint32 hbDBAPI = 1500; + ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI); + + setHbApiDelay(hbDBAPI); }//Qmgr::initData() void Qmgr::initRecords() @@ -113,6 +130,7 @@ Qmgr::Qmgr(Block_context& ctx) addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF); addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF); addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP); + addRecSignal(GSN_START_ORD, &Qmgr::execSTART_ORD); initData(); }//Qmgr::Qmgr() diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 1fba4d62e17..23e7829481e 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -238,6 +238,38 @@ Qmgr::execREAD_CONFIG_REQ(Signal* signal) ReadConfigConf::SignalLength, JBB); } +void +Qmgr::execSTART_ORD(Signal* signal) +{ + /** + * Start timer handling + */ + signal->theData[0] = ZTIMER_HANDLING; + sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 1, JBB); + + NodeRecPtr nodePtr; + for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) + { + ptrAss(nodePtr, nodeRec); + nodePtr.p->ndynamicId = 0; + if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB) + { + nodePtr.p->phase = ZINIT; + c_definedNodes.set(nodePtr.i); + } else { + nodePtr.p->phase = ZAPI_INACTIVE; + } + + setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0; + nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE; + nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE; + nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE; + nodePtr.p->failState = NORMAL; + nodePtr.p->rcv[0] = 0; + nodePtr.p->rcv[1] = 0; + }//for +} + /* 4.2 ADD NODE MODULE*/ /*##########################################################################*/ @@ -298,8 +330,6 @@ void Qmgr::startphase1(Signal* signal) nodePtr.i = getOwnNodeId(); ptrAss(nodePtr, nodeRec); nodePtr.p->phase = ZSTARTING; - nodePtr.p->blockRef = reference(); - c_connectedNodes.set(nodePtr.i); signal->theData[0] = reference(); sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB); @@ -371,11 +401,14 @@ void Qmgr::execCONNECT_REP(Signal* signal) case ZFAIL_CLOSING: jam(); return; - case ZINIT: - ndbrequire(false); case ZAPI_ACTIVE: case ZAPI_INACTIVE: return; + case ZINIT: + ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM); + break; + default: + ndbrequire(false); } if (getNodeInfo(nodeId).getType() != NodeInfo::DB) @@ -1212,12 +1245,6 @@ void Qmgr::execCM_REGREF(Signal* signal) { jam(); electionWon(signal); - - /** - * Start timer handling - */ - signal->theData[0] = ZTIMER_HANDLING; - sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB); } return; @@ -1855,12 +1882,6 @@ Qmgr::joinedCluster(Signal* signal, NodeRecPtr nodePtr){ sendSttorryLab(signal); - /** - * Start timer handling - */ - signal->theData[0] = ZTIMER_HANDLING; - sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB); - sendCmAckAdd(signal, getOwnNodeId(), CmAdd::CommitNew); } @@ -2094,25 +2115,6 @@ void Qmgr::findNeighbours(Signal* signal) /*---------------------------------------------------------------------------*/ void Qmgr::initData(Signal* signal) { - NodeRecPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRec); - nodePtr.p->ndynamicId = 0; - if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB){ - nodePtr.p->phase = ZINIT; - c_definedNodes.set(nodePtr.i); - } else { - nodePtr.p->phase = ZAPI_INACTIVE; - } - - setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0; - nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE; - nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE; - nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE; - nodePtr.p->failState = NORMAL; - nodePtr.p->rcv[0] = 0; - nodePtr.p->rcv[1] = 0; - }//for cfailureNr = 1; ccommitFailureNr = 1; cprepareFailureNr = 1; @@ -2146,13 +2148,11 @@ void Qmgr::initData(Signal* signal) ndbrequire(p != 0); Uint32 hbDBDB = 1500; - Uint32 hbDBAPI = 1500; Uint32 arbitTimeout = 1000; c_restartPartialTimeout = 30000; c_restartPartionedTimeout = 60000; c_restartFailureTimeout = ~0; ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &hbDBDB); - ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI); ndb_mgm_get_int_parameter(p, CFG_DB_ARBIT_TIMEOUT, &arbitTimeout); ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT, &c_restartPartialTimeout); @@ -2177,7 +2177,6 @@ void Qmgr::initData(Signal* signal) } setHbDelay(hbDBDB); - setHbApiDelay(hbDBAPI); setArbitTimeout(arbitTimeout); arbitRec.state = ARBIT_NULL; // start state for all nodes @@ -2204,7 +2203,6 @@ void Qmgr::initData(Signal* signal) execARBIT_CFG(signal); } - setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION; }//Qmgr::initData() @@ -2237,20 +2235,22 @@ void Qmgr::timerHandlingLab(Signal* signal) hb_check_timer.reset(); } } - + if (interface_check_timer.check(TcurrentTime)) { jam(); interface_check_timer.reset(); checkStartInterface(signal); } + if (hb_api_timer.check(TcurrentTime)) + { + jam(); + hb_api_timer.reset(); + apiHbHandlingLab(signal); + } + if (cactivateApiCheck != 0) { jam(); - if (hb_api_timer.check(TcurrentTime)) { - jam(); - hb_api_timer.reset(); - apiHbHandlingLab(signal); - }//if if (clatestTransactionCheck == 0) { //------------------------------------------------------------- // Initialise the Transaction check timer. @@ -2367,18 +2367,21 @@ void Qmgr::apiHbHandlingLab(Signal* signal) if(type == NodeInfo::INVALID) continue; - if (TnodePtr.p->phase == ZAPI_ACTIVE){ + if (c_connectedNodes.get(nodeId)) + { jam(); setNodeInfo(TnodePtr.i).m_heartbeat_cnt++; - if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2){ + if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2) + { signal->theData[0] = NDB_LE_MissedHeartbeat; signal->theData[1] = nodeId; signal->theData[2] = getNodeInfo(TnodePtr.i).m_heartbeat_cnt - 1; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); } - if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4) { + if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4) + { jam(); /*------------------------------------------------------------------*/ /* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS. @@ -2390,8 +2393,8 @@ void Qmgr::apiHbHandlingLab(Signal* signal) signal->theData[0] = NDB_LE_DeadDueToHeartbeat; signal->theData[1] = nodeId; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - node_failed(signal, nodeId); + + api_failed(signal, nodeId); }//if }//if }//for @@ -2480,26 +2483,6 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); - - /**------------------------------------------------------------------------- - * THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY - * BROKEN AND THUS NO ACTION IS NEEDED TO BREAK THE CONNECTION. - * WE ONLY NEED TO SET PARAMETERS TO ENABLE A NEW CONNECTION IN A FEW - * SECONDS. - *-------------------------------------------------------------------------*/ - setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0; - setNodeInfo(failedNodePtr.i).m_version = 0; - recompute_version_info(getNodeInfo(failedNodePtr.i).m_type); - - CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0]; - - closeCom->xxxBlockRef = reference(); - closeCom->failNo = 0; - closeCom->noOfNodes = 1; - NodeBitmask::clear(closeCom->theNodes); - NodeBitmask::set(closeCom->theNodes, failedNodePtr.i); - sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal, - CloseComReqConf::SignalLength, JBA); }//Qmgr::sendApiFailReq() void Qmgr::execAPI_FAILREQ(Signal* signal) @@ -2512,20 +2495,7 @@ void Qmgr::execAPI_FAILREQ(Signal* signal) ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB); - // ignore if api not active - if (failedNodePtr.p->phase != ZAPI_ACTIVE) - { - jam(); - // But send to SUMA anyway... - sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); - return; - } - - signal->theData[0] = NDB_LE_Disconnected; - signal->theData[1] = failedNodePtr.i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - node_failed(signal, failedNodePtr.i); + api_failed(signal, signal->theData[0]); } void Qmgr::execAPI_FAILCONF(Signal* signal) @@ -2649,6 +2619,13 @@ void Qmgr::execDISCONNECT_REP(Signal* signal) ndbrequire(false); } + if (getNodeInfo(nodeId).getType() != NodeInfo::DB) + { + jam(); + api_failed(signal, nodeId); + return; + } + switch(nodePtr.p->phase){ case ZRUNNING: jam(); @@ -2685,66 +2662,109 @@ void Qmgr::node_failed(Signal* signal, Uint16 aFailedNode) failedNodePtr.i = aFailedNode; ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); - if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB){ + ndbrequire(getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB); + + /**--------------------------------------------------------------------- + * THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT + * FAILURE WAS DISCOVERED. + *---------------------------------------------------------------------*/ + switch(failedNodePtr.p->phase){ + case ZRUNNING: jam(); - /**--------------------------------------------------------------------- - * THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT - * FAILURE WAS DISCOVERED. - *---------------------------------------------------------------------*/ - switch(failedNodePtr.p->phase){ - case ZRUNNING: - jam(); - failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE); - return; - case ZFAIL_CLOSING: - jam(); - return; - case ZSTARTING: - c_start.reset(); - // Fall-through - default: - jam(); - /*---------------------------------------------------------------------*/ - // The other node is still not in the cluster but disconnected. - // We must restart communication in three seconds. - /*---------------------------------------------------------------------*/ - failedNodePtr.p->failState = NORMAL; - failedNodePtr.p->phase = ZFAIL_CLOSING; - setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0; - - CloseComReqConf * const closeCom = - (CloseComReqConf *)&signal->theData[0]; - - closeCom->xxxBlockRef = reference(); - closeCom->failNo = 0; - closeCom->noOfNodes = 1; - NodeBitmask::clear(closeCom->theNodes); - NodeBitmask::set(closeCom->theNodes, failedNodePtr.i); - sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal, - CloseComReqConf::SignalLength, JBA); - }//if + failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE); return; - } - - /** - * API code - */ - jam(); - if (failedNodePtr.p->phase != ZFAIL_CLOSING){ + case ZFAIL_CLOSING: + jam(); + return; + case ZSTARTING: + c_start.reset(); + // Fall-through + default: jam(); - //------------------------------------------------------------------------- - // The API was active and has now failed. We need to initiate API failure - // handling. If the API had already failed then we can ignore this - // discovery. - //------------------------------------------------------------------------- + /*---------------------------------------------------------------------*/ + // The other node is still not in the cluster but disconnected. + // We must restart communication in three seconds. + /*---------------------------------------------------------------------*/ + failedNodePtr.p->failState = NORMAL; failedNodePtr.p->phase = ZFAIL_CLOSING; - - sendApiFailReq(signal, aFailedNode); - arbitRec.code = ArbitCode::ApiFail; - handleArbitApiFail(signal, aFailedNode); + setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0; + + CloseComReqConf * const closeCom = + (CloseComReqConf *)&signal->theData[0]; + + closeCom->xxxBlockRef = reference(); + closeCom->failNo = 0; + closeCom->noOfNodes = 1; + NodeBitmask::clear(closeCom->theNodes); + NodeBitmask::set(closeCom->theNodes, failedNodePtr.i); + sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal, + CloseComReqConf::SignalLength, JBA); }//if return; -}//Qmgr::node_failed() +} + +void +Qmgr::api_failed(Signal* signal, Uint32 nodeId) +{ + NodeRecPtr failedNodePtr; + /**------------------------------------------------------------------------ + * A COMMUNICATION LINK HAS BEEN DISCONNECTED. WE MUST TAKE SOME ACTION + * DUE TO THIS. + *-----------------------------------------------------------------------*/ + failedNodePtr.i = nodeId; + ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); + + if (failedNodePtr.p->phase == ZFAIL_CLOSING) + { + /** + * Failure handling already in progress + */ + jam(); + return; + } + + if (failedNodePtr.p->phase == ZAPI_ACTIVE) + { + jam(); + sendApiFailReq(signal, nodeId); + arbitRec.code = ArbitCode::ApiFail; + handleArbitApiFail(signal, nodeId); + } + else + { + /** + * Always inform SUMA + */ + jam(); + signal->theData[0] = nodeId; + signal->theData[1] = QMGR_REF; + sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); + failedNodePtr.p->failState = NORMAL; + } + + failedNodePtr.p->phase = ZFAIL_CLOSING; + setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0; + setNodeInfo(failedNodePtr.i).m_version = 0; + recompute_version_info(getNodeInfo(failedNodePtr.i).m_type); + + CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0]; + closeCom->xxxBlockRef = reference(); + closeCom->failNo = 0; + closeCom->noOfNodes = 1; + NodeBitmask::clear(closeCom->theNodes); + NodeBitmask::set(closeCom->theNodes, failedNodePtr.i); + sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal, + CloseComReqConf::SignalLength, JBA); + + if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::MGM) + { + /** + * Allow MGM do reconnect "directly" + */ + jam(); + setNodeInfo(failedNodePtr.i).m_heartbeat_cnt = 3; + } +} /**-------------------------------------------------------------------------- * AN API NODE IS REGISTERING. IF FOR THE FIRST TIME WE WILL ENABLE @@ -4963,43 +4983,39 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal) c_start.m_president_candidate_gci); infoEvent("ctoStatus = %d\n", ctoStatus); for(Uint32 i = 1; iphase){ - case ZINIT: - sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase); - break; - case ZSTARTING: - sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase); - break; - case ZRUNNING: - sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase); - break; - case ZPREPARE_FAIL: - sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase); - break; - case ZFAIL_CLOSING: - sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase); - break; - case ZAPI_INACTIVE: - sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase); - break; - case ZAPI_ACTIVE: - sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase); - break; - default: - sprintf(buf, "Node %d: (%d)", i, nodePtr.p->phase); - break; - } - infoEvent(buf); + NodeRecPtr nodePtr; + nodePtr.i = i; + ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec); + char buf[100]; + switch(nodePtr.p->phase){ + case ZINIT: + sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase); + break; + case ZSTARTING: + sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase); + break; + case ZRUNNING: + sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase); + break; + case ZPREPARE_FAIL: + sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase); + break; + case ZFAIL_CLOSING: + sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase); + break; + case ZAPI_INACTIVE: + sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase); + break; + case ZAPI_ACTIVE: + sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase); + break; + default: + sprintf(buf, "Node %d: (%d)", i, nodePtr.p->phase); + break; } + infoEvent(buf); } - default: - ; - }//switch + } #ifdef ERROR_INSERT if (signal->theData[0] == 935 && signal->getLength() == 2) -- cgit v1.2.1 From e7501cd9bc9480c1f181a51d2e968e9bb3050c1c Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 13:13:37 +0300 Subject: Fix for bug #32080: Excessive warnings on Solaris: setrlimit could not change the size of core files. Suppress the 'setrlimit could not change the size of the core files' warning in mysql-test-run. We do not want core files on some of the PushBuild hosts, and PushBuild itself does not set --core-files, so that warning is expected. mysql-test/lib/mtr_report.pl: Suppress the 'setrlimit could not change the size of the core files' warning in mysql-test-run. We do not want core files on some of the PushBuild machines, and PushBuild itself does not set --core-files, so that warning is expected. --- mysql-test/lib/mtr_report.pl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl index dcd7d1ec842..53854a6e3b9 100644 --- a/mysql-test/lib/mtr_report.pl +++ b/mysql-test/lib/mtr_report.pl @@ -346,7 +346,10 @@ sub mtr_report_stats ($) { # BUG#29839 - lowercase_table3.test: Cannot find table test/T1 # from the internal data dictiona - /Cannot find table test\/BUG29839 from the internal data dictionary/ + /Cannot find table test\/BUG29839 from the internal data dictionary/ or + # BUG#32080 - Excessive warnings on Solaris: setrlimit could not + # change the size of core files + /setrlimit could not change the size of core files to 'infinity'/ ) { next; # Skip these lines -- cgit v1.2.1 From 8a06cfdcab0f16830ba9f25bb6c042f59d13b235 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 12:13:10 +0100 Subject: ndb - bug#32652 release subscriber storage/ndb/src/kernel/blocks/suma/Suma.cpp: release subscriber --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 94df9a2b32e..8e6c0cd7ee7 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -2400,6 +2400,7 @@ Suma::execSUB_START_REQ(Signal* signal){ { jam(); + c_subscriberPool.release(subbPtr); sendSubStartRef(signal, SubStartRef::PartiallyConnected); return; } -- cgit v1.2.1 From 339e8f5d6354d350ea6a5eb13ff8faead65dca49 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 13:40:53 +0200 Subject: merge 5.0-opt -> 5.1-opt --- mysql-test/r/subselect.result | 101 ++++++++++++++++++++++++++++++++++++++++++ sql/opt_range.cc | 2 +- sql/sql_select.cc | 2 +- 3 files changed, 103 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index fe7664f9b9c..54d33f18a53 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -4139,6 +4139,107 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; (SELECT SUM(t1.a) FROM t2 WHERE a=1) 3 DROP TABLE t1,t2; +create table t1(a int,b int,key(a),key(b)); +insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), +(6,7),(7,4),(5,3); +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +sum(a) a +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index a a 5 NULL 9 Using where; Using index +2 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +3 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +4 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +5 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +6 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +7 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +8 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +9 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +10 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +11 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +12 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort +13 SUBQUERY t1 index NULL a 5 NULL 9 Using index +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +drop table t1; CREATE TABLE t1 (a1 INT, a2 INT); CREATE TABLE t2 (b1 INT, b2 INT); INSERT INTO t1 VALUES (100, 200); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 10280168f07..a56fd9aad64 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2162,7 +2162,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, if (!keys_to_use.is_clear_all()) { #ifndef EMBEDDED_LIBRARY // Avoid compiler warning - char buff[STACK_BUFF_ALLOC]; + uchar buff[STACK_BUFF_ALLOC]; #endif MEM_ROOT alloc; SEL_TREE *tree= NULL; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index bec8d987276..f379542f8d7 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2367,7 +2367,7 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, int error; DBUG_ENTER("get_quick_record_count"); #ifndef EMBEDDED_LIBRARY // Avoid compiler warning - char buff[STACK_BUFF_ALLOC]; + uchar buff[STACK_BUFF_ALLOC]; #endif if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) DBUG_RETURN(0); // Fatal error flag is set -- cgit v1.2.1 From 6b3fed49ea438ebb4e685510aa8f13f584ac0945 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 16:27:05 +0400 Subject: Bug#32178 server crash when select from i_s and concurrent partition management The crash happens because we change share->partition_info where 'share' is global struct (it affects other threads which use the same 'share'). It causes discrepancy between 'share' and handler data. The fix: Move share->partition_info update into WFRM_INSTALL_SHADOW part which is protected by OPEN_lock. sql/sql_partition.cc: fast_end_partition: added close_thread_tables() for the case when error occures fast_alter_partition_table: added close_thread_tables() for the case when error occures sql/sql_table.cc: The crash happens because we change share->partition_info where 'share' is global struct. It causes discrepancy between 'share' and handler data. The fix: Move share->partition_info update into WFRM_INSTALL_SHADOW part which is protected by OPEN_lock. --- sql/sql_partition.cc | 14 +++++++++----- sql/sql_table.cc | 52 ++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 47 insertions(+), 19 deletions(-) diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index a06ad0a4612..1f365ac991b 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3984,6 +3984,7 @@ static int fast_end_partition(THD *thd, ulonglong copied, DBUG_RETURN(FALSE); } table->file->print_error(error, MYF(0)); + close_thread_tables(thd); DBUG_RETURN(TRUE); } @@ -6106,7 +6107,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, (error= table->file->repair_partitions(thd)))) { table->file->print_error(error, MYF(0)); - DBUG_RETURN(TRUE); + goto err; } } else if (fast_alter_partition & HA_PARTITION_ONE_PHASE) @@ -6153,7 +6154,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, if (mysql_write_frm(lpt, WFRM_WRITE_SHADOW | WFRM_PACK_FRM) || mysql_change_partitions(lpt)) { - DBUG_RETURN(TRUE); + goto err; } } else if (alter_info->flags == ALTER_DROP_PARTITION) @@ -6246,7 +6247,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, (release_name_lock(lpt), FALSE)) { handle_alter_part_error(lpt, not_completed, TRUE, frm_install); - DBUG_RETURN(TRUE); + goto err; } } else if ((alter_info->flags & ALTER_ADD_PARTITION) && @@ -6315,7 +6316,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, (release_name_lock(lpt), FALSE)) { handle_alter_part_error(lpt, not_completed, FALSE, frm_install); - DBUG_RETURN(TRUE); + goto err; } } else @@ -6408,7 +6409,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, (release_name_lock(lpt), FALSE)) { handle_alter_part_error(lpt, not_completed, FALSE, frm_install); - DBUG_RETURN(TRUE); + goto err; } } /* @@ -6418,6 +6419,9 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted, table, table_list, FALSE, NULL, written_bin_log)); +err: + close_thread_tables(thd); + DBUG_RETURN(TRUE); } #endif diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 6f0864cb1cf..bf15968f09b 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1248,6 +1248,10 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) char shadow_path[FN_REFLEN+1]; char shadow_frm_name[FN_REFLEN+1]; char frm_name[FN_REFLEN+1]; +#ifdef WITH_PARTITION_STORAGE_ENGINE + char *part_syntax_buf; + uint syntax_len; +#endif DBUG_ENTER("mysql_write_frm"); /* @@ -1271,12 +1275,8 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) #ifdef WITH_PARTITION_STORAGE_ENGINE { partition_info *part_info= lpt->table->part_info; - char *part_syntax_buf; - uint syntax_len; - if (part_info) { - TABLE_SHARE *share= lpt->table->s; if (!(part_syntax_buf= generate_partition_syntax(part_info, &syntax_len, TRUE, TRUE))) @@ -1284,16 +1284,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) DBUG_RETURN(TRUE); } part_info->part_info_string= part_syntax_buf; - share->partition_info_len= part_info->part_info_len= syntax_len; - if (share->partition_info_buffer_size < syntax_len + 1) - { - share->partition_info_buffer_size= syntax_len+1; - if (!(share->partition_info= - (char*) alloc_root(&share->mem_root, syntax_len+1))) - DBUG_RETURN(TRUE); - - } - memcpy((char*) share->partition_info, part_syntax_buf, syntax_len + 1); + part_info->part_info_len= syntax_len; } } #endif @@ -1371,7 +1362,40 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) #endif { error= 1; + goto err; + } +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (part_info) + { + TABLE_SHARE *share= lpt->table->s; + char *tmp_part_syntax_str; + if (!(part_syntax_buf= generate_partition_syntax(part_info, + &syntax_len, + TRUE, TRUE))) + { + error= 1; + goto err; + } + if (share->partition_info_buffer_size < syntax_len + 1) + { + share->partition_info_buffer_size= syntax_len+1; + if (!(tmp_part_syntax_str= (char*) strmake_root(&share->mem_root, + part_syntax_buf, + syntax_len))) + { + error= 1; + goto err; + } + share->partition_info= tmp_part_syntax_str; + } + else + memcpy((char*) share->partition_info, part_syntax_buf, syntax_len + 1); + share->partition_info_len= part_info->part_info_len= syntax_len; + part_info->part_info_string= part_syntax_buf; } +#endif + +err: VOID(pthread_mutex_unlock(&LOCK_open)); #ifdef WITH_PARTITION_STORAGE_ENGINE deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos); -- cgit v1.2.1 From 9a34c80e117872b6e564879e0d6cb6dafaa6568a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Nov 2007 18:21:24 +0400 Subject: Bug#30152 MySQLD crash duing alter table causes DROP DATABASE to FAIL due to temp file disable decoding of table name if the table is internal temporary table mysql-test/r/drop.result: test result mysql-test/t/drop.test: test case sql/sql_db.cc: check is the name is internal tmp table name sql/sql_table.cc: disable decoding of table name if the table is internal temporary table sql/table.h: added flag which is true when table name is the name of internal temporary table --- mysql-test/r/drop.result | 5 +++++ mysql-test/t/drop.test | 13 +++++++++++++ sql/sql_db.cc | 1 + sql/sql_table.cc | 5 +++-- sql/table.h | 1 + 5 files changed, 23 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result index ff11905aa34..4626dd580f7 100644 --- a/mysql-test/r/drop.result +++ b/mysql-test/r/drop.result @@ -85,3 +85,8 @@ select 1; 1 unlock tables; End of 5.0 tests +create database mysql_test; +create table mysql_test.t1(f1 int); +create table mysql_test.`#sql-347f_7` (f1 int); +drop database mysql_test; +End of 5.1 tests diff --git a/mysql-test/t/drop.test b/mysql-test/t/drop.test index a1451773e90..a79044436eb 100644 --- a/mysql-test/t/drop.test +++ b/mysql-test/t/drop.test @@ -122,3 +122,16 @@ disconnect addconroot2; connection default; --echo End of 5.0 tests + +# +# Bug#30152 MySQLD crash duing alter table causes DROP DATABASE to FAIL due to temp file +# +create database mysql_test; +create table mysql_test.t1(f1 int); +create table mysql_test.`#sql-347f_7` (f1 int); +create table mysql_test.`#sql-347f_8` (f1 int); +drop table mysql_test.`#sql-347f_8`; +copy_file $MYSQLTEST_VARDIR/master-data/mysql_test/t1.frm $MYSQLTEST_VARDIR/master-data/mysql_test/#sql-347f_6.frm; +drop database mysql_test; + +--echo End of 5.1 tests diff --git a/sql/sql_db.cc b/sql/sql_db.cc index abbf2131957..88902e65a42 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1111,6 +1111,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, VOID(filename_to_tablename(file->name, table_list->table_name, strlen(file->name) + 1)); table_list->alias= table_list->table_name; // If lower_case_table_names=2 + table_list->internal_tmp_table= is_prefix(file->name, tmp_file_prefix); /* Link into list */ (*tot_list_next)= table_list; tot_list_next= &table_list->next_local; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bf15968f09b..75c97d8edd4 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1663,8 +1663,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, } alias= (lower_case_table_names == 2) ? table->alias : table->table_name; /* remove .frm file and engine files */ - path_length= build_table_filename(path, sizeof(path), - db, alias, reg_ext, 0); + path_length= build_table_filename(path, sizeof(path), db, alias, reg_ext, + table->internal_tmp_table ? + FN_IS_TMP : 0); } if (drop_temporary || (table_type == NULL && diff --git a/sql/table.h b/sql/table.h index 2bbd71b70c6..440f3a3d5cf 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1081,6 +1081,7 @@ struct TABLE_LIST ... SELECT implementation). */ bool create; + bool internal_tmp_table; /* View creation context. */ -- cgit v1.2.1 From e4dc9a8e6e13405859afda2d472e4cc4b6dc169c Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 24 Nov 2007 14:57:09 +0400 Subject: merging fix mysql-test/r/gis.result: result fixed mysql-test/t/gis.test: test fixed --- mysql-test/r/gis.result | 2 +- mysql-test/t/gis.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 9c7f2bb4beb..4334a34c3ff 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -742,7 +742,7 @@ select geomfromtext(col9,col89) as a from t1; a NULL DROP TABLE t1; -create table t1(col1 geometry not null,col15 geometrycollection not +create table t1(col1 geometry default null,col15 geometrycollection not null,spatial index(col15),index(col1(15)))engine=myisam; insert into t1 set col15 = GeomFromText('POINT(6 5)'); insert into t1 set col15 = GeomFromText('POINT(6 5)'); diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 6d7ec046c4c..d578b5c9955 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -443,7 +443,7 @@ DROP TABLE t1; # Bug #30284 spatial key corruption # -create table t1(col1 geometry not null,col15 geometrycollection not +create table t1(col1 geometry default null,col15 geometrycollection not null,spatial index(col15),index(col1(15)))engine=myisam; insert into t1 set col15 = GeomFromText('POINT(6 5)'); insert into t1 set col15 = GeomFromText('POINT(6 5)'); -- cgit v1.2.1 From aab5779bcab9aa3c18eb556a4222e879e26cdd07 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 12:56:31 +0400 Subject: Bug#30152 MySQLD crash duing alter table causes DROP DATABASE to FAIL due to temp file(addon) result fix mysql-test/r/drop.result: result fix --- mysql-test/r/drop.result | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result index 4626dd580f7..71d6fcc7cd0 100644 --- a/mysql-test/r/drop.result +++ b/mysql-test/r/drop.result @@ -88,5 +88,7 @@ End of 5.0 tests create database mysql_test; create table mysql_test.t1(f1 int); create table mysql_test.`#sql-347f_7` (f1 int); +create table mysql_test.`#sql-347f_8` (f1 int); +drop table mysql_test.`#sql-347f_8`; drop database mysql_test; End of 5.1 tests -- cgit v1.2.1 From f3f9855d133060c5279349edac49546748ceae57 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 13:36:24 +0200 Subject: Bug #32036: EXISTS within a WHERE clause with a UNION crashes MySQL 5.122 There was a difference in how UNIONs are handled on top level and when in sub-query. Because the rules for sub-queries were syntactically allowing cases that are not currently supported by the server we had crashes (this bug) or wrong results (bug 32051). Fixed by making the syntax rules for UNIONs match the ones at top level. These rules however do not support nesting UNIONs, e.g. (SELECT a FROM t1 UNION ALL SELECT b FROM t2) UNION (SELECT c FROM t3 UNION ALL SELECT d FROM t4) Supports for statements with nested UNIONs will be added in a future version. mysql-test/r/subselect.result: Bug #32036: test case mysql-test/t/subselect.test: Bug #32036: test case sql/sql_yacc.yy: Bug #32036: Make the syntax rules for UNIONs in subqueries the same as for top level UNIONs. --- mysql-test/r/subselect.result | 35 +++++++++++++++++++++++++++-------- mysql-test/t/subselect.test | 31 ++++++++++++++++++++++++++++++- sql/sql_yacc.yy | 41 ++++++++++++++--------------------------- 3 files changed, 71 insertions(+), 36 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index bfacfc86eef..352519badcd 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -3558,22 +3558,19 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS (SELECT i FROM t1) UNION (SELECT i FROM t1) ); -i +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNION +(SELECT i FROM t1) +)' at line 3 SELECT * FROM t1 WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1))); -i +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2 explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12)) from t1; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'union (select t12.i from t1 t12)) from t1' at line 1 explain select * from t1 where not exists ((select t11.i from t1 t11) union (select t12.i from t1 t12)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used -3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table -4 UNION t12 system NULL NULL NULL NULL 0 const row not found -NULL UNION RESULT ALL NULL NULL NULL NULL NULL +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'union (select t12.i from t1 t12))' at line 2 DROP TABLE t1; CREATE TABLE t1 (a VARCHAR(250), b INT auto_increment, PRIMARY KEY (b)); insert into t1 (a) values (FLOOR(rand() * 100)); @@ -4150,4 +4147,26 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1; 0 0 DROP TABLE t1, t2; +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a INT); +INSERT INTO t1 VALUES (1),(2); +INSERT INTO t2 VALUES (1),(2); +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); +2 +2 +2 +EXPLAIN EXTENDED +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1003 select 2 AS `2` from `test`.`t1` where exists(select 1 AS `1` from `test`.`t2` where (`test`.`t1`.`a` = `test`.`t2`.`a`)) +EXPLAIN EXTENDED +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION +(SELECT 1 FROM t2 WHERE t1.a = t2.a)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNION +(SELECT 1 FROM t2 WHERE t1.a = t2.a))' at line 2 +DROP TABLE t1,t2; End of 5.0 tests. diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index b5279331a5f..43e8c2532bf 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -2448,12 +2448,16 @@ DROP TABLE t1, t2; CREATE TABLE t1 (i INT); (SELECT i FROM t1) UNION (SELECT i FROM t1); +#TODO:not supported +--error ER_PARSE_ERROR SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS ( (SELECT i FROM t1) UNION (SELECT i FROM t1) ); +#TODO:not supported +--error ER_PARSE_ERROR SELECT * FROM t1 WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1))); @@ -2461,7 +2465,9 @@ WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1))); --error 1064 explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12)) from t1; -#supported + +#TODO:not supported +--error ER_PARSE_ERROR explain select * from t1 where not exists ((select t11.i from t1 t11) union (select t12.i from t1 t12)); @@ -3002,4 +3008,27 @@ INSERT INTO t2 VALUES (103, 203); SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1; DROP TABLE t1, t2; +# +# Bug #32036: EXISTS within a WHERE clause with a UNION crashes MySQL 5.122 +# + +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a INT); + +INSERT INTO t1 VALUES (1),(2); +INSERT INTO t2 VALUES (1),(2); + +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); +EXPLAIN EXTENDED +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); + + +#TODO:not supported +--error ER_PARSE_ERROR +EXPLAIN EXTENDED +SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION + (SELECT 1 FROM t2 WHERE t1.a = t2.a)); + +DROP TABLE t1,t2; + --echo End of 5.0 tests. diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 3401bf739b3..2e2a9b180d0 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1137,7 +1137,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type internal_variable_name -%type subselect subselect_init +%type subselect take_first_select get_select_lex %type comp_op @@ -9422,35 +9422,22 @@ union_option: | ALL { $$=0; } ; +take_first_select: /* empty */ + { + $$= Lex->current_select->master_unit()->first_select(); + }; + subselect: - SELECT_SYM subselect_start subselect_init subselect_end + SELECT_SYM subselect_start select_init2 take_first_select + subselect_end { - $$= $3; + $$= $4; } - | '(' subselect_start subselect ')' - { - THD *thd= YYTHD; - /* - note that a local variable can't be used for - $3 as it's used in local variable construction - and some compilers can't guarnatee the order - in which the local variables are initialized. - */ - List_iterator it($3->item_list); - Item *item; - /* - we must fill the items list for the "derived table". - */ - while ((item= it++)) - add_item_to_list(thd, item); - } - union_clause subselect_end { $$= $3; }; - -subselect_init: - select_init2 - { - $$= Lex->current_select->master_unit()->first_select(); - }; + | '(' subselect_start select_paren take_first_select + subselect_end ')' + { + $$= $4; + }; subselect_start: { -- cgit v1.2.1 From 7257ed0e103abe0550c256b1b2cc46c4c918755f Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 16:25:18 +0400 Subject: Bug #31900 Wrong confusing comment in mysql_com.h header file. comment fixed as we truly need const_item to be 1 to mark constant function include/mysql_com.h: Bug #31900 Wrong confusing comment in mysql_com.h header file. comment fixed --- include/mysql_com.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/mysql_com.h b/include/mysql_com.h index 56c7f7d2ab5..016df41476b 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -325,11 +325,11 @@ typedef struct st_udf_args typedef struct st_udf_init { - my_bool maybe_null; /* 1 if function can return NULL */ - unsigned int decimals; /* for real functions */ - unsigned long max_length; /* For string functions */ - char *ptr; /* free pointer for function data */ - my_bool const_item; /* 0 if result is independent of arguments */ + my_bool maybe_null; /* 1 if function can return NULL */ + unsigned int decimals; /* for real functions */ + unsigned long max_length; /* For string functions */ + char *ptr; /* free pointer for function data */ + my_bool const_item; /* 1 if function always returns the same value */ } UDF_INIT; /* Constants when using compression */ -- cgit v1.2.1 From 185b9efe8c243f28d24d30a79165c83df6eb6d12 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 16:06:46 +0200 Subject: merge of 5.0-opt to 5.1-opt --- mysql-test/r/subselect.result | 53 +++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index b1aee11ad7b..ecfc069aa45 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -77,10 +77,9 @@ ERROR HY000: Incorrect usage of PROCEDURE and subquery SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR HY000: Incorrect parameters to procedure 'ANALYSE' SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; -a +ERROR 42S22: Unknown column 'a' in 'field list' SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NOT NULL; -a -1 +ERROR 42S22: Unknown column 'a' in 'field list' SELECT (SELECT 1,2,3) = ROW(1,2,3); (SELECT 1,2,3) = ROW(1,2,3) 1 @@ -2304,24 +2303,27 @@ drop table t1,t2; CREATE TABLE t1 ( a int, b int ); CREATE TABLE t2 ( c int, d int ); INSERT INTO t1 VALUES (1,2), (2,3), (3,4); -SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); abc b 1 2 2 3 3 4 -INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t2; c d 1 2 2 3 3 4 -CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc); +CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = +(SELECT MIN(b) FROM t1 WHERE a=outr.a); select * from t3; abc b 1 2 2 3 3 4 -prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "INSERT INTO t2 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; deallocate prepare stmt1; select * from t2; @@ -2333,7 +2335,7 @@ c d 2 3 3 4 drop table t3; -prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 WHERE b = (SELECT MIN(b) FROM t1 WHERE a=abc);"; +prepare stmt1 from "CREATE TABLE t3 SELECT a AS abc, b FROM t1 outr WHERE b = (SELECT MIN(b) FROM t1 WHERE a=outr.a);"; execute stmt1; select * from t3; abc b @@ -2515,7 +2517,9 @@ INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,680 INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; -SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); +SELECT DISTINCT Continent AS c FROM t1 outr WHERE +Code <> SOME ( SELECT Code FROM t1 WHERE Continent = outr.Continent AND +Population < 200); c Oceania drop table t1; @@ -4328,6 +4332,31 @@ id select_type table type possible_keys key key_len ref rows Extra SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500); a b DROP TABLE t1,t2; +CREATE TABLE t1(a INT, b INT); +INSERT INTO t1 VALUES (1,1), (1,2), (2,3), (2,4); +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; +ERROR 42S22: Unknown column 'out_a' in 'where clause' +SELECT a AS out_a, MIN(b) FROM t1 +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = out_a) +GROUP BY a; +ERROR 42S22: Unknown column 'out_a' in 'where clause' +EXPLAIN +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1_outer ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 4 Using where +SELECT a AS out_a, MIN(b) FROM t1 t1_outer +WHERE b > (SELECT MIN(b) FROM t1 WHERE a = t1_outer.a) +GROUP BY a; +out_a MIN(b) +1 2 +2 4 +DROP TABLE t1; CREATE TABLE t1 (a INT); CREATE TABLE t2 (a INT); INSERT INTO t1 VALUES (1),(2); @@ -4338,9 +4367,9 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); 2 EXPLAIN EXTENDED SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where -2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1003 select 2 AS `2` from `test`.`t1` where exists(select 1 AS `1` from `test`.`t2` where (`test`.`t1`.`a` = `test`.`t2`.`a`)) -- cgit v1.2.1 From 1836b736000595b3b63c03b1a6db01003b01e395 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 18:36:05 +0300 Subject: 5.0 version of the fix for bug #9481: mysql_insert_id() returns 0 after insert ... select. The 5.0 manual page for mysql_insert_id() does not mention anything about INSERT ... SELECT, though its current behavior is incosistent with what the manual says about the plain INSERT. Fixed by changing the AUTO_INCREMENT and mysql_insert_id() handling logic in INSERT ... SELECT to be consistent with the INSERT behavior, the manual, and the changes in 5.1 introduced by WL3146: - mysql_insert_id() now returns the first automatically generated AUTO_INCREMENT value that was successfully inserted by INSERT ... SELECT - if an INSERT ... SELECT statement is executed, and no automatically generated value is successfully inserted, mysql_insert_id() now returns the ID of the last inserted row. sql/sql_class.h: Replaced last_insert_id with autoinc_value_of_last_inserted_row to be consistent with 5.1 code. sql/sql_insert.cc: Revised the AUTO_INCREMENT and mysql_insert_id() handling logic in INSERT ... SELECT to be consistent with INSERT behavior, the manual, and changes in 5.1 introduced by WL3146: - mysql_insert_id() now returns the first automatically generated AUTO_INCREMENT value that was successfully inserted; - if an INSERT ... SELECT statement is executed, and no automatically generated value is successfully inserted, mysql_insert_id() now returns the ID of the last inserted row. tests/mysql_client_test.c: Backported the test cases related to INSERT ... SELECT and mysql_insert_id() from WL3146 patch to 5.0. --- sql/sql_class.h | 3 +- sql/sql_insert.cc | 48 ++++++++++-- tests/mysql_client_test.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 224 insertions(+), 9 deletions(-) diff --git a/sql/sql_class.h b/sql/sql_class.h index 93a9d4d6da2..5a5607469d9 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2048,7 +2048,8 @@ class select_insert :public select_result_interceptor { TABLE_LIST *table_list; TABLE *table; List *fields; - ulonglong last_insert_id; + ulonglong autoinc_value_of_last_inserted_row; // not autogenerated + ulonglong autoinc_value_of_first_inserted_row; // autogenerated COPY_INFO info; bool insert_into_view; bool is_bulk_insert_mode; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 770bbd1349d..70f73f90be4 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2644,7 +2644,8 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, enum_duplicates duplic, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), - last_insert_id(0), + autoinc_value_of_last_inserted_row(0), + autoinc_value_of_first_inserted_row(0), insert_into_view(table_list_par && table_list_par->view != 0), is_bulk_insert_mode(FALSE) { @@ -2901,15 +2902,25 @@ bool select_insert::send_data(List &values) } if (table->next_number_field) { + /* + If no value has been autogenerated so far, we need to remember the + value we just saw, we may need to send it to client in the end. + */ + if (!thd->insert_id_used) + autoinc_value_of_last_inserted_row= table->next_number_field->val_int(); /* Clear auto-increment field for the next record, if triggers are used we will clear it twice, but this should be cheap. */ table->next_number_field->reset(); - if (!last_insert_id && thd->insert_id_used) - last_insert_id= thd->last_insert_id; + if (!autoinc_value_of_last_inserted_row && thd->insert_id_used) + autoinc_value_of_last_inserted_row= thd->last_insert_id; } } + + if (thd->insert_id_used && !autoinc_value_of_first_inserted_row) + autoinc_value_of_first_inserted_row= thd->last_insert_id; + DBUG_RETURN(error); } @@ -2938,6 +2949,7 @@ bool select_insert::send_eof() { int error, error2; bool changed, transactional_table= table->file->has_transactions(); + ulonglong id; DBUG_ENTER("select_insert::send_eof"); error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; @@ -2959,8 +2971,17 @@ bool select_insert::send_eof() DBUG_ASSERT(transactional_table || !changed || thd->transaction.stmt.modified_non_trans_table); - if (last_insert_id) - thd->insert_id(info.copied ? last_insert_id : 0); // For binary log + // For binary log + if (autoinc_value_of_last_inserted_row) + { + if (info.copied) + thd->insert_id(autoinc_value_of_last_inserted_row); + else + { + autoinc_value_of_first_inserted_row= 0; + thd->insert_id(0); + } + } /* Write to binlog before commiting transaction */ if (mysql_bin_log.is_open()) { @@ -2987,7 +3008,9 @@ bool select_insert::send_eof() thd->row_count_func= info.copied + info.deleted + ((thd->client_capabilities & CLIENT_FOUND_ROWS) ? info.touched : info.updated); - ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); + id= autoinc_value_of_first_inserted_row > 0 ? + autoinc_value_of_first_inserted_row : thd->last_insert_id; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); DBUG_RETURN(0); } @@ -3016,8 +3039,17 @@ void select_insert::abort() if ((changed= info.copied || info.deleted || info.updated) && !transactional_table) { - if (last_insert_id) - thd->insert_id(last_insert_id); // For binary log + // For binary log + if (autoinc_value_of_last_inserted_row) + { + if (info.copied) + thd->insert_id(autoinc_value_of_last_inserted_row); + else + { + autoinc_value_of_first_inserted_row= 0; + thd->insert_id(0); + } + } if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 65e18da97f1..5b619886fba 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -15199,6 +15199,187 @@ static void test_bug14169() } +/* + Test that mysql_insert_id() behaves as documented in our manual +*/ + +static void test_mysql_insert_id() +{ + my_ulonglong res; + int rc; + + myheader("test_mysql_insert_id"); + + rc= mysql_query(mysql, "drop table if exists t1"); + myquery(rc); + /* table without auto_increment column */ + rc= mysql_query(mysql, "create table t1 (f1 int, f2 varchar(255), key(f1))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t1 values (1,'a')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 values (null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 select 5,'c'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 select null,'d'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t1 values (null,last_insert_id(300))"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 300); + rc= mysql_query(mysql, "insert into t1 select null,last_insert_id(400)"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Behaviour change: old code used to return 0; but 400 is consistent + with INSERT VALUES, and the manual's section of mysql_insert_id() does not + say INSERT SELECT should be different. + */ + DIE_UNLESS(res == 400); + + /* table with auto_increment column */ + rc= mysql_query(mysql, "create table t2 (f1 int not null primary key auto_increment, f2 varchar(255))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t2 values (1,'a')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 1); + /* this should not influence next INSERT if it doesn't have auto_inc */ + rc= mysql_query(mysql, "insert into t1 values (10,'e')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + + rc= mysql_query(mysql, "insert into t2 values (null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 2); + rc= mysql_query(mysql, "insert into t2 select 5,'c'"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Manual says that for multirow insert this should have been 5, but does not + say for INSERT SELECT. This is a behaviour change: old code used to return + 0. We try to be consistent with INSERT VALUES. + */ + DIE_UNLESS(res == 5); + rc= mysql_query(mysql, "insert into t2 select null,'d'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 6); + /* with more than one row */ + rc= mysql_query(mysql, "insert into t2 values (10,'a'),(11,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 11); + rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + Manual says that for multirow insert this should have been 13, but does + not say for INSERT SELECT. This is a behaviour change: old code used to + return 0. We try to be consistent with INSERT VALUES. + */ + DIE_UNLESS(res == 13); + rc= mysql_query(mysql, "insert into t2 values (null,'a'),(null,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 14); + rc= mysql_query(mysql, "insert into t2 select null,'a' union select null,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 16); + rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'"); + myquery_r(rc); + rc= mysql_query(mysql, "insert ignore into t2 select 12,'a' union select 13,'b'"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert into t2 values (12,'a'),(13,'b')"); + myquery_r(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "insert ignore into t2 values (12,'a'),(13,'b')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + /* mixing autogenerated and explicit values */ + rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b')"); + myquery_r(rc); + rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b'),(25,'g')"); + myquery_r(rc); + rc= mysql_query(mysql, "insert into t2 values (null,last_insert_id(300))"); + myquery(rc); + res= mysql_insert_id(mysql); + /* + according to the manual, this might be 20 or 300, but it looks like + auto_increment column takes priority over last_insert_id(). + */ + DIE_UNLESS(res == 20); + /* If first autogenerated number fails and 2nd works: */ + rc= mysql_query(mysql, "drop table t2"); + myquery(rc); + rc= mysql_query(mysql, "create table t2 (f1 int not null primary key " + "auto_increment, f2 varchar(255), unique (f2))"); + myquery(rc); + rc= mysql_query(mysql, "insert into t2 values (null,'e')"); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 1); + rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(null,'a'),(null,'e')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 2); + /* If autogenerated fails and explicit works: */ + rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(12,'c'),(null,'d')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 3); + /* UPDATE may update mysql_insert_id() if it uses LAST_INSERT_ID(#) */ + rc= mysql_query(mysql, "update t2 set f1=14 where f1=12"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "update t2 set f1=NULL where f1=14"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + rc= mysql_query(mysql, "update t2 set f2=last_insert_id(372) where f1=0"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 372); + /* check that LAST_INSERT_ID() does not update mysql_insert_id(): */ + rc= mysql_query(mysql, "insert into t2 values (null,'g')"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 15); + rc= mysql_query(mysql, "update t2 set f2=(@li:=last_insert_id()) where f1=15"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 0); + /* + Behaviour change: now if ON DUPLICATE KEY UPDATE updates a row, + mysql_insert_id() returns the id of the row, instead of not being + affected. + */ + rc= mysql_query(mysql, "insert into t2 values (null,@li) on duplicate key " + "update f2=concat('we updated ',f2)"); + myquery(rc); + res= mysql_insert_id(mysql); + DIE_UNLESS(res == 15); + + rc= mysql_query(mysql, "drop table t1,t2"); + myquery(rc); +} + + /* Bug#20152: mysql_stmt_execute() writes to MYSQL_TYPE_DATE buffer */ @@ -16237,6 +16418,7 @@ static struct my_tests_st my_tests[]= { { "test_bug17667", test_bug17667 }, { "test_bug19671", test_bug19671 }, { "test_bug15752", test_bug15752 }, + { "test_mysql_insert_id", test_mysql_insert_id }, { "test_bug21206", test_bug21206 }, { "test_bug21726", test_bug21726 }, { "test_bug15518", test_bug15518 }, -- cgit v1.2.1 From 67bf39f241bb1742dfa221218ff3d50842f58490 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 18:58:54 +0300 Subject: Fix for bug #28837: MyISAM storage engine error (134) doing delete with self-join When doing DELETE with self-join on a MyISAM or MERGE table, it could happen that a record being retrieved in join_read_next_same() has already been deleted by previous iterations. That caused the engine's index_next_same() method to fail with HA_ERR_RECORD_DELETED error and the whole DELETE query to be aborted with an error. Fixed by suppressing the HA_ERR_RECORD_DELETED error in hy_myisam::index_next_same() and ha_myisammrg::index_next_same(). Since HA_ERR_RECORD_DELETED can only be returned by MyISAM, there is no point in filtering this error in the SQL layer. mysql-test/r/merge.result: Added a test case for bug #28837. mysql-test/r/myisam.result: Added a test case for bug #28837. mysql-test/t/merge.test: Added a test case for bug #28837. mysql-test/t/myisam.test: Added a test case for bug #28837. sql/ha_myisam.cc: Skip HA_ERR_RECORD_DELETED silently when calling mi_rnext_same(). sql/ha_myisammrg.cc: Skip HA_ERR_RECORD_DELETED silently when calling mi_rnext_same(). --- mysql-test/r/merge.result | 37 +++++++++++++++++++++++++++++++++++++ mysql-test/r/myisam.result | 22 ++++++++++++++++++++++ mysql-test/t/merge.test | 22 ++++++++++++++++++++++ mysql-test/t/myisam.test | 17 +++++++++++++++++ sql/ha_myisam.cc | 8 ++++++-- sql/ha_myisammrg.cc | 8 ++++++-- 6 files changed, 110 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index 42669eeb66f..7e8a0df6908 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -876,4 +876,41 @@ CHECK TABLE tm1; Table Op Msg_type Msg_text test.tm1 check status OK DROP TABLE tm1, t1, t2; +CREATE TABLE t1 (id INT NOT NULL, ref INT NOT NULL, INDEX (id)) ENGINE=MyISAM; +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 (id, ref) VALUES (1,3), (2,1), (3,2), (4,5), (4,4); +INSERT INTO t1 SELECT * FROM t2; +INSERT INTO t1 SELECT * FROM t2; +CREATE TABLE t3 (id INT NOT NULL, ref INT NOT NULL, INDEX (id)) ENGINE=MERGE +UNION(t1); +SELECT * FROM t3 AS a INNER JOIN t3 AS b USING (id) WHERE a.ref < b.ref; +id ref ref +4 4 5 +4 4 5 +4 4 5 +4 4 5 +SELECT * FROM t3; +id ref +1 3 +2 1 +3 2 +4 5 +4 4 +1 3 +2 1 +3 2 +4 5 +4 4 +DELETE FROM a USING t3 AS a INNER JOIN t3 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t3; +id ref +1 3 +2 1 +3 2 +4 5 +1 3 +2 1 +3 2 +4 5 +DROP TABLE t1, t2, t3; End of 5.0 tests diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 7fc29cd13ca..79a9891b15c 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1806,4 +1806,26 @@ SELECT a FROM t1 FORCE INDEX (inx) WHERE a=1; a 1 DROP TABLE t1; +CREATE TABLE t1 (id int NOT NULL, ref int NOT NULL, INDEX (id)) ENGINE=MyISAM; +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 (id, ref) VALUES (1,3), (2,1), (3,2), (4,5), (4,4); +INSERT INTO t1 SELECT * FROM t2; +SELECT * FROM t1 AS a INNER JOIN t1 AS b USING (id) WHERE a.ref < b.ref; +id ref ref +4 4 5 +SELECT * FROM t1; +id ref +1 3 +2 1 +3 2 +4 5 +4 4 +DELETE FROM a USING t1 AS a INNER JOIN t1 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t1; +id ref +1 3 +2 1 +3 2 +4 5 +DROP TABLE t1, t2; End of 5.0 tests diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test index c3e5cef5e63..7e64daf076a 100644 --- a/mysql-test/t/merge.test +++ b/mysql-test/t/merge.test @@ -507,4 +507,26 @@ SELECT * FROM tm1; CHECK TABLE tm1; DROP TABLE tm1, t1, t2; +# +# Bug #28837: MyISAM storage engine error (134) doing delete with self-join +# + +CREATE TABLE t1 (id INT NOT NULL, ref INT NOT NULL, INDEX (id)) ENGINE=MyISAM; +CREATE TABLE t2 LIKE t1; + +INSERT INTO t2 (id, ref) VALUES (1,3), (2,1), (3,2), (4,5), (4,4); +INSERT INTO t1 SELECT * FROM t2; +INSERT INTO t1 SELECT * FROM t2; + +CREATE TABLE t3 (id INT NOT NULL, ref INT NOT NULL, INDEX (id)) ENGINE=MERGE + UNION(t1); + +SELECT * FROM t3 AS a INNER JOIN t3 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t3; +DELETE FROM a USING t3 AS a INNER JOIN t3 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t3; + +DROP TABLE t1, t2, t3; + + --echo End of 5.0 tests diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index d5f403616c8..ab00af4cc1a 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -1161,4 +1161,21 @@ ALTER TABLE t1 ENABLE KEYS; SELECT a FROM t1 FORCE INDEX (inx) WHERE a=1; DROP TABLE t1; +# +# Bug#28837: MyISAM storage engine error (134) doing delete with self-join +# + +CREATE TABLE t1 (id int NOT NULL, ref int NOT NULL, INDEX (id)) ENGINE=MyISAM; +CREATE TABLE t2 LIKE t1; + +INSERT INTO t2 (id, ref) VALUES (1,3), (2,1), (3,2), (4,5), (4,4); +INSERT INTO t1 SELECT * FROM t2; + +SELECT * FROM t1 AS a INNER JOIN t1 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t1; +DELETE FROM a USING t1 AS a INNER JOIN t1 AS b USING (id) WHERE a.ref < b.ref; +SELECT * FROM t1; + +DROP TABLE t1, t2; + --echo End of 5.0 tests diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 92fa9e405e1..86f04672676 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1602,10 +1602,14 @@ int ha_myisam::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { + int error; DBUG_ASSERT(inited==INDEX); statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - int error=mi_rnext_same(file,buf); + &LOCK_status); + do + { + error= mi_rnext_same(file,buf); + } while (error == HA_ERR_RECORD_DELETED); table->status=error ? STATUS_NOT_FOUND: 0; return error; } diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 60aa4bd6adc..78492d2843d 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -294,9 +294,13 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { + int error; statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - int error=myrg_rnext_same(file,buf); + &LOCK_status); + do + { + error= myrg_rnext_same(file,buf); + } while (error == HA_ERR_RECORD_DELETED); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -- cgit v1.2.1 From 3058e0257315eeb183a1df10376cd9f4df3061fb Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2007 22:21:46 +0300 Subject: Many files: Test case for the bug#31048 is moved to the subselect_notembedded tests as the embedded server isn't affected by this bug. mysql-test/r/subselect.result: Test case for the bug#31048 is moved to the subselect_notembedded tests as the embedded server isn't affected by this bug. mysql-test/r/subselect_notembedded.result: Test case for the bug#31048 is moved to the subselect_notembedded tests as the embedded server isn't affected by this bug. mysql-test/t/subselect.test: Test case for the bug#31048 is moved to the subselect_notembedded tests as the embedded server isn't affected by this bug. mysql-test/t/subselect_notembedded.test: Test case for the bug#31048 is moved to the subselect_notembedded tests as the embedded server isn't affected by this bug. --- mysql-test/r/subselect.result | 101 ------------------------------ mysql-test/r/subselect_notembedded.result | 101 ++++++++++++++++++++++++++++++ mysql-test/t/subselect.test | 94 --------------------------- mysql-test/t/subselect_notembedded.test | 96 ++++++++++++++++++++++++++++ 4 files changed, 197 insertions(+), 195 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index ecfc069aa45..bf320e585d6 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -4140,107 +4140,6 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; (SELECT SUM(t1.a) FROM t2 WHERE a=1) 3 DROP TABLE t1,t2; -create table t1(a int,b int,key(a),key(b)); -insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), -(6,7),(7,4),(5,3); -select sum(a),a from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 -)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -sum(a) a -select sum(a),a from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -ERROR HY000: Thread stack overrun detected -explain select sum(a),a from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 -)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 index a a 5 NULL 9 Using where; Using index -2 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -3 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -4 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -5 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -6 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -7 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -8 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -9 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -10 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -11 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort -12 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort -13 SUBQUERY t1 index NULL a 5 NULL 9 Using index -explain select sum(a),a from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( -select sum(a) from t1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1 -)group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -ERROR HY000: Thread stack overrun detected -drop table t1; CREATE TABLE t1 (a1 INT, a2 INT); CREATE TABLE t2 (b1 INT, b2 INT); INSERT INTO t1 VALUES (100, 200); diff --git a/mysql-test/r/subselect_notembedded.result b/mysql-test/r/subselect_notembedded.result index 44ae055425e..90aadcae398 100644 --- a/mysql-test/r/subselect_notembedded.result +++ b/mysql-test/r/subselect_notembedded.result @@ -1,3 +1,104 @@ purge master logs before (select adddate(current_timestamp(), interval -4 day)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'select adddate(current_timestamp(), interval -4 day))' at line 1 purge master logs before adddate(current_timestamp(), interval -4 day); +create table t1(a int,b int,key(a),key(b)); +insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), +(6,7),(7,4),(5,3); +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +sum(a) a +select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 +)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index a a 5 NULL 9 Using where; Using index +2 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +3 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +4 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +5 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +6 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +7 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +8 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +9 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +10 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +11 SUBQUERY t1 index a b 5 NULL 9 Using where; Using filesort +12 SUBQUERY t1 range a a 5 NULL 1 Using where; Using temporary; Using filesort +13 SUBQUERY t1 index NULL a 5 NULL 9 Using index +explain select sum(a),a from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( +select sum(a) from t1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1 +)group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +ERROR HY000: Thread stack overrun detected +drop table t1; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 3af2ec1891d..077e00a4c6e 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -3000,100 +3000,6 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a!=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; DROP TABLE t1,t2; -# -# Bug31048: Many nested subqueries may cause server crash. -# -create table t1(a int,b int,key(a),key(b)); -insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), - (6,7),(7,4),(5,3); -# test for the stack overflow bug -select sum(a),a from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 - )group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1) -group by a; ---replace_regex /overrun.*$/overrun detected/ ---error 1436 -select sum(a),a from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -# test for the memory consumption & subquery slowness bug -explain select sum(a),a from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 - )group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1) -group by a; ---replace_regex /overrun.*$/overrun detected/ ---error 1436 -explain select sum(a),a from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( - select sum(a) from t1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1 - )group by b limit 1)group by b limit 1)group by b limit 1) -group by a; -drop table t1; - # # Bug #31884: Assertion + crash in subquery in the SELECT clause. # diff --git a/mysql-test/t/subselect_notembedded.test b/mysql-test/t/subselect_notembedded.test index c112272e8ad..040c90452b9 100644 --- a/mysql-test/t/subselect_notembedded.test +++ b/mysql-test/t/subselect_notembedded.test @@ -7,3 +7,99 @@ --error 1064 purge master logs before (select adddate(current_timestamp(), interval -4 day)); purge master logs before adddate(current_timestamp(), interval -4 day); + +# +# Bug31048: Many nested subqueries may cause server crash. +# +create table t1(a int,b int,key(a),key(b)); +insert into t1(a,b) values (1,2),(2,1),(2,3),(3,4),(5,4),(5,5), + (6,7),(7,4),(5,3); +# test for the stack overflow bug +select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 + )group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +--replace_regex /overrun.*$/overrun detected/ +--error 1436 +select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +# test for the memory consumption & subquery slowness bug +explain select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 + )group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +--replace_regex /overrun.*$/overrun detected/ +--error 1436 +explain select sum(a),a from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 where a> ( select sum(a) from t1 where a> ( + select sum(a) from t1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1 + )group by b limit 1)group by b limit 1)group by b limit 1) +group by a; +drop table t1; + + -- cgit v1.2.1 From d068dd1aabf29d7080a040469cf6ca58996f6fea Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Nov 2007 19:14:48 +0400 Subject: Fixed bug #32403: query causes a crash due to stack and memory corruptions. The right pointer field of the SEL_ARG structure was not initialized in the constructor and sometimes that led to server crashes. There is no testcase because the bug occurs only when uninitialized memory has particular values, which can't be re-created in the test suite. sql/opt_range.cc: Fixed bug #32403. The eq_tree function requires that SEL_ARG::left and SEL_ARG::right are equal to null pointer if SEL_ARG type is MAYBE_KEY, but SEL_ARG::right was not initialized and contained garbage. --- sql/opt_range.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 969777d4792..58ce6c8da7a 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -250,6 +250,9 @@ public: Field *field; char *min_value,*max_value; // Pointer to range + /* + eq_tree() requires that left == right == 0 if the type is MAYBE_KEY. + */ SEL_ARG *left,*right; /* R-B tree children */ SEL_ARG *next,*prev; /* Links for bi-directional interval list */ SEL_ARG *parent; /* R-B tree parent */ @@ -265,7 +268,7 @@ public: SEL_ARG(Field *field, uint8 part, char *min_value, char *max_value, uint8 min_flag, uint8 max_flag, uint8 maybe_flag); SEL_ARG(enum Type type_arg) - :min_flag(0),elements(1),use_count(1),left(0),next_key_part(0), + :min_flag(0),elements(1),use_count(1),left(0),right(0),next_key_part(0), color(BLACK), type(type_arg) {} inline bool is_same(SEL_ARG *arg) -- cgit v1.2.1 From 63b65169534c97f0c225859d2d6d49f3cee2bc10 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Nov 2007 17:16:52 +0200 Subject: Bug #30355: Incorrect ordering of UDF results There's currently no way of knowing the determinicity of an UDF. And the optimizer and the sequence() UDFs were making wrong assumptions about what the is_const member means. Plus there was no implementation of update_system_tables() causing the optimizer to overwrite the information returned by the _init function. Fixed by equating the assumptions about the semantics of is_const and providing a implementation of update_used_tables(). Added a TODO item for the UDF API change needed to make a better implementation. include/mysql_com.h: Bug #30355: comment added mysql-test/r/udf.result: Bug #30355: test case mysql-test/t/udf.test: Bug #30355: test case sql/item_func.cc: Bug #30355: keep const_item_cache and used_tables_cache in sync sql/item_func.h: Bug #30355: - a better implementation of update_used_tables() - keep const_item_cache and used_tables_cache in sync sql/udf_example.c: Bug #30355: Wrong value for const_item fixed. --- include/mysql_com.h | 4 ++++ mysql-test/r/udf.result | 27 ++++++++++++++++++++++++++ mysql-test/t/udf.test | 21 +++++++++++++++++++++ sql/item_func.cc | 6 ++++++ sql/item_func.h | 50 +++++++++++++++++++++++++++++++++++++++++++++++++ sql/udf_example.c | 10 ++++------ 6 files changed, 112 insertions(+), 6 deletions(-) diff --git a/include/mysql_com.h b/include/mysql_com.h index 889579e3622..fc03d98194b 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -393,6 +393,10 @@ typedef struct st_udf_init char *ptr; /* free pointer for function data */ my_bool const_item; /* 0 if result is independent of arguments */ } UDF_INIT; +/* + TODO: add a notion for determinism of the UDF. + See Item_udf_func::update_used_tables () +*/ /* Constants when using compression */ #define NET_HEADER_SIZE 4 /* standard header size */ diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result index e6797796ea0..a79be1c3189 100644 --- a/mysql-test/r/udf.result +++ b/mysql-test/r/udf.result @@ -327,4 +327,31 @@ DROP FUNCTION check_const_len; DROP PROCEDURE check_const_len_sp; DROP TRIGGER check_const_len_trigger; DROP TABLE const_len_bug; +CREATE FUNCTION sequence RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (4),(3),(2),(1); +INSERT INTO t2 SELECT * FROM t1; +SELECT sequence() AS seq, a FROM t1 ORDER BY seq ASC; +seq a +1 4 +2 3 +3 2 +4 1 +SELECT sequence() AS seq, a FROM t1 ORDER BY seq DESC; +seq a +4 1 +3 2 +2 3 +1 4 +SELECT * FROM t1 WHERE a = sequence(); +a +SELECT * FROM t2 WHERE a = sequence(); +a +1 +2 +3 +4 +DROP FUNCTION sequence; +DROP TABLE t1,t2; End of 5.0 tests. diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test index 22b8ed10a49..648494b3df9 100644 --- a/mysql-test/t/udf.test +++ b/mysql-test/t/udf.test @@ -362,4 +362,25 @@ DROP PROCEDURE check_const_len_sp; DROP TRIGGER check_const_len_trigger; DROP TABLE const_len_bug; + +# +# Bug #30355: Incorrect ordering of UDF results +# + +--replace_result $UDF_EXAMPLE_LIB UDF_EXAMPLE_LIB +eval CREATE FUNCTION sequence RETURNS INTEGER SONAME "$UDF_EXAMPLE_LIB"; +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (4),(3),(2),(1); +INSERT INTO t2 SELECT * FROM t1; + +SELECT sequence() AS seq, a FROM t1 ORDER BY seq ASC; +SELECT sequence() AS seq, a FROM t1 ORDER BY seq DESC; + +SELECT * FROM t1 WHERE a = sequence(); +SELECT * FROM t2 WHERE a = sequence(); + +DROP FUNCTION sequence; +DROP TABLE t1,t2; + --echo End of 5.0 tests. diff --git a/sql/item_func.cc b/sql/item_func.cc index 264d9265bca..0309c1a17cb 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2968,6 +2968,12 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, func->max_length=min(initid.max_length,MAX_BLOB_WIDTH); func->maybe_null=initid.maybe_null; const_item_cache=initid.const_item; + /* + Keep used_tables_cache in sync with const_item_cache. + See the comment in Item_udf_func::update_used tables. + */ + if (!const_item_cache && !used_tables_cache) + used_tables_cache= RAND_TABLE_BIT; func->decimals=min(initid.decimals,NOT_FIXED_DEC); } initialized=1; diff --git a/sql/item_func.h b/sql/item_func.h index a31294c0395..734b215ddc0 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1016,6 +1016,56 @@ public: fixed= 1; return res; } + void update_used_tables() + { + /* + TODO: Make a member in UDF_INIT and return if a UDF is deterministic or + not. + Currently UDF_INIT has a member (const_item) that is an in/out + parameter to the init() call. + The code in udf_handler::fix_fields also duplicates the arguments + handling code in Item_func::fix_fields(). + + The lack of information if a UDF is deterministic makes writing + a correct update_used_tables() for UDFs impossible. + One solution to this would be : + - Add a is_deterministic member of UDF_INIT + - (optionally) deprecate the const_item member of UDF_INIT + - Take away the duplicate code from udf_handler::fix_fields() and + make Item_udf_func call Item_func::fix_fields() to process its + arguments as for any other function. + - Store the deterministic flag returned by _init into the + udf_handler. + - Don't implement Item_udf_func::fix_fields, implement + Item_udf_func::fix_length_and_dec() instead (similar to non-UDF + functions). + - Override Item_func::update_used_tables to call + Item_func::update_used_tables() and add a RAND_TABLE_BIT to the + result of Item_func::update_used_tables() if the UDF is + non-deterministic. + - (optionally) rename RAND_TABLE_BIT to NONDETERMINISTIC_BIT to + better describe its usage. + + The above would require a change of the UDF API. + Until that change is done here's how the current code works: + We call Item_func::update_used_tables() only when we know that + the function depends on real non-const tables and is deterministic. + This can be done only because we know that the optimizer will + call update_used_tables() only when there's possibly a new const + table. So update_used_tables() can only make a Item_func more + constant than it is currently. + That's why we don't need to do anything if a function is guaranteed + to return non-constant (it's non-deterministic) or is already a + const. + */ + if ((used_tables_cache & ~PSEUDO_TABLE_BITS) && + !(used_tables_cache & RAND_TABLE_BIT)) + { + Item_func::update_used_tables(); + if (!const_item_cache && !used_tables_cache) + used_tables_cache= RAND_TABLE_BIT; + } + } void cleanup(); Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } diff --git a/sql/udf_example.c b/sql/udf_example.c index df3a69755ad..4ca6133da03 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -648,13 +648,11 @@ my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 1; } bzero(initid->ptr,sizeof(longlong)); - /* - Fool MySQL to think that this function is a constant - This will ensure that MySQL only evalutes the function - when the rows are sent to the client and not before any ORDER BY - clauses + /* + sequence() is a non-deterministic function : it has different value + even if called with the same arguments. */ - initid->const_item=1; + initid->const_item=0; return 0; } -- cgit v1.2.1 From 7c0db983ad8eb8e1d37cf8468bbfd32de87a14ad Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Nov 2007 18:24:24 +0100 Subject: Bug#32707: misdimensioned buffer in protocol layer Miscalculation in protocol-layer: size buffer correctly so even really long error messages cannot overflow our buffer. sql/protocol.cc: size buffer correctly so really long error messages cannot overflow it. --- sql/protocol.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sql/protocol.cc b/sql/protocol.cc index 7c7dfaf7bef..f7a34fde94a 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -56,7 +56,10 @@ void send_error(THD *thd, uint sql_errno, const char *err) { #ifndef EMBEDDED_LIBRARY uint length; - char buff[MYSQL_ERRMSG_SIZE+2], *pos; + /* + buff[]: sql_errno:2 + ('#':1 + SQLSTATE_LENGTH:5) + MYSQL_ERRMSG_SIZE:512 + */ + char buff[2+1+SQLSTATE_LENGTH+MYSQL_ERRMSG_SIZE], *pos; #endif const char *orig_err= err; NET *net= &thd->net; -- cgit v1.2.1 From b5c0cf454dd20f3512d74984c7fc1b77c2e7b737 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Nov 2007 03:02:05 +0300 Subject: BUG#32772: partition crash 1: enum column The bug was that for ordered index scans, ha_partition::index_init() did not put index columns into table->read_set if the underlying storage engine did not have HA_PARTIAL_COLUMN_READ flag. This was causing assertion failure when handle_ordered_index_scan() tried to sort the records according to index order. Fixed by making ha_partition::index_init() put index columns into table->read_set for all ordered scans. mysql-test/r/partition.result: BUG#32772: partition crash 1: enum column - Testcase mysql-test/t/partition.test: BUG#32772: partition crash 1: enum column - Testcase sql/ha_partition.cc: BUG#32772: partition crash 1: enum column - Make ha_partition::index_init() include index columns in the read_set whenever an ordered scan is initialized, no matter if HA_PARTIAL_COLUMN_READ is set or not. --- mysql-test/r/partition.result | 13 +++++++++++++ mysql-test/t/partition.test | 20 ++++++++++++++++++++ sql/ha_partition.cc | 15 +++++++++------ 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index d646226daf8..be7d7bb158e 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1290,4 +1290,17 @@ create table t1 partition by key(s1) partitions 3; insert into t1 values (null,null); drop table t1; +create table t1 ( +c0 int, +c1 bigint, +c2 set('sweet'), +key (c2,c1,c0), +key(c0) +) engine=myisam partition by hash (month(c0)) partitions 5; +insert ignore into t1 set c0 = -6502262, c1 = 3992917, c2 = 35019; +insert ignore into t1 set c0 = 241221, c1 = -6862346, c2 = 56644; +select c1 from t1 group by (select c0 from t1 limit 1); +c1 +-6862346 +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 65e78a5e740..e86d7c8dfb5 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -1531,4 +1531,24 @@ while ($cnt) --enable_query_log drop table t1; + +# +# BUG#32272: partition crash 1: enum column +# +create table t1 ( + c0 int, + c1 bigint, + c2 set('sweet'), + key (c2,c1,c0), + key(c0) +) engine=myisam partition by hash (month(c0)) partitions 5; + +--disable_warnings +insert ignore into t1 set c0 = -6502262, c1 = 3992917, c2 = 35019; +insert ignore into t1 set c0 = 241221, c1 = -6862346, c2 = 56644; +--enable_warnings +# This must not fail assert: +select c1 from t1 group by (select c0 from t1 limit 1); +drop table t1; + --echo End of 5.1 tests diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 423415ce4ae..7234222b79f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3413,14 +3413,17 @@ int ha_partition::index_init(uint inx, bool sorted) */ if (m_lock_type == F_WRLCK) bitmap_union(table->read_set, &m_part_info->full_part_field_set); - else if (sorted && m_table_flags & HA_PARTIAL_COLUMN_READ) + else if (sorted) { /* - An ordered scan is requested and necessary fields aren't in read_set. - This may happen e.g. with SELECT COUNT(*) FROM t1. We must ensure - that all fields of current key are included into read_set, as - partitioning requires them for sorting - (see ha_partition::handle_ordered_index_scan). + An ordered scan is requested. We must make sure all fields of the + used index are in the read set, as partitioning requires them for + sorting (see ha_partition::handle_ordered_index_scan). + + The SQL layer may request an ordered index scan without having index + fields in the read set when + - it needs to do an ordered scan over an index prefix. + - it evaluates ORDER BY with SELECT COUNT(*) FROM t1. TODO: handle COUNT(*) queries via unordered scan. */ -- cgit v1.2.1 From b9d8e34925d1be4b6c1c8950217d41456a02c542 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Nov 2007 15:37:00 +0400 Subject: Bug#25629 CREATE TABLE LIKE does not work with INFORMATION_SCHEMA added new func mysql_create_like_schema_frm() which creates frm file based on I_S table mysql-test/r/create.result: test result mysql-test/t/create.test: test case sql/sql_table.cc: added new func mysql_create_like_schema_frm() which creates frm file based on I_S table --- mysql-test/r/create.result | 38 +++++++++++++++++++++++++++++ mysql-test/t/create.test | 13 ++++++++++ sql/sql_table.cc | 59 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 109 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index ca3b344af53..9e7fef5e3f1 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -1705,4 +1705,42 @@ create table t1 as select 1; create table t2 as select f1() from t1; drop table t1,t2; drop function f1; +create table t1 like information_schema.processlist; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `ID` bigint(4) NOT NULL DEFAULT '0', + `USER` varchar(16) NOT NULL DEFAULT '', + `HOST` varchar(64) NOT NULL DEFAULT '', + `DB` varchar(64) DEFAULT NULL, + `COMMAND` varchar(16) NOT NULL DEFAULT '', + `TIME` bigint(7) NOT NULL DEFAULT '0', + `STATE` varchar(64) DEFAULT NULL, + `INFO` longtext +) ENGINE=MyISAM DEFAULT CHARSET=utf8 +drop table t1; +create temporary table t1 like information_schema.processlist; +show create table t1; +Table Create Table +t1 CREATE TEMPORARY TABLE `t1` ( + `ID` bigint(4) NOT NULL DEFAULT '0', + `USER` varchar(16) NOT NULL DEFAULT '', + `HOST` varchar(64) NOT NULL DEFAULT '', + `DB` varchar(64) DEFAULT NULL, + `COMMAND` varchar(16) NOT NULL DEFAULT '', + `TIME` bigint(7) NOT NULL DEFAULT '0', + `STATE` varchar(64) DEFAULT NULL, + `INFO` longtext +) ENGINE=MyISAM DEFAULT CHARSET=utf8 +drop table t1; +create table t1 like information_schema.character_sets; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `CHARACTER_SET_NAME` varchar(64) NOT NULL DEFAULT '', + `DEFAULT_COLLATE_NAME` varchar(64) NOT NULL DEFAULT '', + `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', + `MAXLEN` bigint(3) NOT NULL DEFAULT '0' +) ENGINE=MEMORY DEFAULT CHARSET=utf8 +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 023e55ea418..06ad25313ce 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -1328,4 +1328,17 @@ create table t2 as select f1() from t1; drop table t1,t2; drop function f1; +# +# Bug#25629 CREATE TABLE LIKE does not work with INFORMATION_SCHEMA +# +create table t1 like information_schema.processlist; +show create table t1; +drop table t1; +create temporary table t1 like information_schema.processlist; +show create table t1; +drop table t1; +create table t1 like information_schema.character_sets; +show create table t1; +drop table t1; + --echo End of 5.1 tests diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 75c97d8edd4..f179d8bea1e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4565,6 +4565,55 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) } + +/** + @brief Create frm file based on I_S table + + @param[in] thd thread handler + @param[in] schema_table I_S table + @param[in] dst_path path where frm should be created + @param[in] create_info Create info + + @return Operation status + @retval 0 success + @retval 1 error +*/ + + +bool mysql_create_like_schema_frm(THD* thd, TABLE_LIST* schema_table, + char *dst_path, HA_CREATE_INFO *create_info) +{ + HA_CREATE_INFO local_create_info; + Alter_info alter_info; + bool tmp_table= (create_info->options & HA_LEX_CREATE_TMP_TABLE); + uint keys= schema_table->table->s->keys; + uint db_options= 0; + DBUG_ENTER("mysql_create_like_schema_frm"); + + bzero((char*) &local_create_info, sizeof(local_create_info)); + local_create_info.db_type= schema_table->table->s->db_type(); + local_create_info.row_type= schema_table->table->s->row_type; + local_create_info.default_table_charset=default_charset_info; + alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE); + schema_table->table->use_all_columns(); + if (mysql_prepare_alter_table(thd, schema_table->table, + &local_create_info, &alter_info)) + DBUG_RETURN(1); + if (mysql_prepare_create_table(thd, &local_create_info, &alter_info, + tmp_table, &db_options, + schema_table->table->file, + &schema_table->table->s->key_info, &keys, 0)) + DBUG_RETURN(1); + local_create_info.max_rows= 0; + if (mysql_create_frm(thd, dst_path, NullS, NullS, + &local_create_info, alter_info.create_list, + keys, schema_table->table->s->key_info, + schema_table->table->file)) + DBUG_RETURN(1); + DBUG_RETURN(0); +} + + /* Create a table identical to the specified table @@ -4668,7 +4717,15 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, during the call to ha_create_table(). See bug #28614 for more info. */ VOID(pthread_mutex_lock(&LOCK_open)); - if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE))) + if (src_table->schema_table) + { + if (mysql_create_like_schema_frm(thd, src_table, dst_path, create_info)) + { + VOID(pthread_mutex_unlock(&LOCK_open)); + goto err; + } + } + else if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE))) { if (my_errno == ENOENT) my_error(ER_BAD_DB_ERROR,MYF(0),db); -- cgit v1.2.1 From 1d062682f5098ea5c30b9b3fef844195790a095e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Nov 2007 10:37:07 +0400 Subject: Bug #32624 Error with multi queries in MySQL embedded server 5.1.22. server status wasn't properly sent to the client after the error by the embedded server. Wasn't noticed before as one usually stopped retrieving results after he gets an error. libmysqld/lib_sql.cc: Bug #32624 Error with multi queries in MySQL embedded server 5.1.22. server status transferred to the client after errors sql/protocol.cc: Bug #32624 Error with multi queries in MySQL embedded server 5.1.22. set server status before net_send_error_packet() call as this function sends it to the client in the embedded server tests/mysql_client_test.c: Bug #32624 Error with multi queries in MySQL embedded server 5.1.22. testcase added --- libmysqld/lib_sql.cc | 2 ++ sql/protocol.cc | 5 +++-- tests/mysql_client_test.c | 14 ++++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 7ac663480c8..ce692169a5f 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -73,6 +73,7 @@ void embedded_get_error(MYSQL *mysql, MYSQL_DATA *data) net->last_errno= ei->last_errno; strmake(net->last_error, ei->info, sizeof(net->last_error)); memcpy(net->sqlstate, ei->sqlstate, sizeof(net->sqlstate)); + mysql->server_status= ei->server_status; my_free((gptr) data, MYF(0)); } @@ -1027,6 +1028,7 @@ void net_send_error_packet(THD *thd, uint sql_errno, const char *err) ei->last_errno= sql_errno; strmake(ei->info, err, sizeof(ei->info)-1); strmov(ei->sqlstate, mysql_errno_to_sqlstate(sql_errno)); + ei->server_status= thd->server_status; thd->cur_data= 0; } diff --git a/sql/protocol.cc b/sql/protocol.cc index 2bdbe83eea1..ac562a9f5ab 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -110,13 +110,14 @@ void net_send_error(THD *thd, uint sql_errno, const char *err) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, err); } + /* Abort multi-result sets */ + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + net_send_error_packet(thd, sql_errno, err); thd->is_fatal_error=0; // Error message is given thd->net.report_error= 0; - /* Abort multi-result sets */ - thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; DBUG_VOID_RETURN; } diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 29935a4924d..33e7d66cb04 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -5643,6 +5643,20 @@ DROP TABLE IF EXISTS test_multi_tab"; (void) my_process_result_set(result); mysql_free_result(result); + /* + Check if errors in one of the queries handled properly. + */ + rc= mysql_query(mysql_local, "select 1; select * from not_existing_table"); + myquery(rc); + result= mysql_store_result(mysql_local); + mysql_free_result(result); + + rc= mysql_next_result(mysql_local); + DIE_UNLESS(rc > 0); + + rc= mysql_next_result(mysql_local); + DIE_UNLESS(rc < 0); + mysql_close(mysql_local); } -- cgit v1.2.1 From 23e402bf4595c6310bef2d7e6a3bcfefb4cb8173 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Nov 2007 14:52:36 +0400 Subject: Bug #29085 A small double precision number becomes zero. Denormalized DOUBLE-s can't be properly handled by old MIPS processors. So we need to enable specific mode for them so IRIX will do use software round to handle such numbers. sql/mysqld.cc: Bug #29085 A small double precision number becomes zero. reset_floating_point_exeption() renamed as set_proper_floating_point_mode() #ifdef __sgi code added to enable denormalized DOUBLE-s on IRIX --- sql/mysqld.cc | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 51332053df6..62105e0093a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -193,7 +193,7 @@ typedef fp_except fp_except_t; this on freebsd */ -inline void reset_floating_point_exceptions() +inline void set_proper_floating_point_mode() { /* Don't fall for overflow, underflow,divide-by-zero or loss of precision */ #if defined(__i386__) @@ -204,8 +204,22 @@ inline void reset_floating_point_exceptions() FP_X_IMP)); #endif } +#elif defined(__sgi) +/* for IRIX to use set_fpc_csr() */ +#include + +inline void set_proper_floating_point_mode() +{ + /* Enable denormalized DOUBLE values support for IRIX */ + { + union fpc_csr n; + n.fc_word = get_fpc_csr(); + n.fc_struct.flush = 0; + set_fpc_csr(n.fc_word); + } +} #else -#define reset_floating_point_exceptions() +#define set_proper_floating_point_mode() #endif /* __FreeBSD__ && HAVE_IEEEFP_H */ } /* cplusplus */ @@ -2876,7 +2890,7 @@ static int init_server_components() query_cache_init(); query_cache_resize(query_cache_size); randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2); - reset_floating_point_exceptions(); + set_proper_floating_point_mode(); init_thr_lock(); #ifdef HAVE_REPLICATION init_slave_list(); -- cgit v1.2.1 From 89a208850a714d5653207cffa2d654e86063a7e7 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 30 Nov 2007 06:32:04 +0100 Subject: Bug#31177: Server variables can't be set to their current values Default values of variables were not subject to upper/lower bounds and step, while setting variables was. Bounds and step are also applied to defaults now; defaults are corrected quietly, values given by the user are corrected, and a correction-warning is thrown as needed. Lastly, very large values could wrap around, starting from 0 again. They are bounded at the maximum value for the respective data-type now if no lower maximum is specified in the variable's definition. client/mysql.cc: correct maxima in options array client/mysqltest.c: adjust minimum for "sleep" option so default value is no longer out of bounds. include/m_string.h: ullstr() - the unsigned brother of llstr() include/my_getopt.h: Flag if we bounded the value (that is, correct anything aside from making value a multiple of block-size) mysql-test/r/delayed.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/index_merge.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/innodb.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/innodb_mysql.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/key_cache.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/packet.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/ps.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/subselect.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/type_bit.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/type_bit_innodb.result: We throw a warning now when we adjust out of range parameters. mysql-test/r/variables.result: correct results: bounds and step apply to variables' default values, too mysql-test/t/variables.test: correct results: bounds and step apply to variables' default values, too mysys/my_getopt.c: - apply bounds/step to default values of variables (based on work by serg) - print complaints about incorrect values for variables (truncation etc., by requestion of consulting) - if no lower maximum is specified in variable definition, bound unsigned values at their maximum to prevent wrap-around - some calls to error_reporter had a \n, some didn't. remove \n from calls, let reporter-function handle it, so the default reporter behaves like that in mysqld sql/mysql_priv.h: correct RANGE_ALLOC_BLOCK_SIZE (cleared with monty) sql/mysqld.cc: correct maxima to correct data-type. correct minima where higher than default. correct range-alloc-block-size. correct inno variables so GET_* corresponds to actual variable's type. sql/set_var.cc: When the new value for a variable is out of bounds, we'll send the client a warning (but not if the value was simply not a multiple of 'blocksize'). sys_var_thd_ulong had this, sys_var_long_ptr_global didn't; broken out and streamlined to avoid duplication of code. strings/llstr.c: ullstr() - the unsigned brother of llstr() --- client/mysql.cc | 13 +-- client/mysqltest.c | 2 +- include/m_string.h | 1 + include/my_getopt.h | 3 +- mysql-test/r/delayed.result | 16 ++++ mysql-test/r/index_merge.result | 2 + mysql-test/r/innodb.result | 4 + mysql-test/r/innodb_mysql.result | 2 + mysql-test/r/key_cache.result | 2 + mysql-test/r/packet.result | 8 ++ mysql-test/r/ps.result | 2 + mysql-test/r/subselect.result | 4 + mysql-test/r/type_bit.result | 2 + mysql-test/r/type_bit_innodb.result | 2 + mysql-test/r/variables.result | 30 ++++++- mysql-test/t/variables.test | 2 +- mysys/my_getopt.c | 175 ++++++++++++++++++++++++++---------- sql/mysql_priv.h | 2 +- sql/mysqld.cc | 80 +++++++++-------- sql/set_var.cc | 70 ++++++++++----- strings/llstr.c | 6 ++ 21 files changed, 307 insertions(+), 121 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index aa34c69b945..05516183c9d 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -737,9 +737,9 @@ static struct my_option my_long_options[] = 0, 1}, {"max_allowed_packet", OPT_MAX_ALLOWED_PACKET, "Max packet length to send to, or receive from server", - (gptr*) &opt_max_allowed_packet, (gptr*) &opt_max_allowed_packet, 0, GET_ULONG, - REQUIRED_ARG, 16 *1024L*1024L, 4096, (longlong) 2*1024L*1024L*1024L, - MALLOC_OVERHEAD, 1024, 0}, + (gptr*) &opt_max_allowed_packet, (gptr*) &opt_max_allowed_packet, 0, + GET_ULONG, REQUIRED_ARG, 16 *1024L*1024L, 4096, + (longlong) 2*1024L*1024L*1024L, MALLOC_OVERHEAD, 1024, 0}, {"net_buffer_length", OPT_NET_BUFFER_LENGTH, "Buffer for TCP/IP and socket communication", (gptr*) &opt_net_buffer_length, (gptr*) &opt_net_buffer_length, 0, GET_ULONG, @@ -747,12 +747,13 @@ static struct my_option my_long_options[] = {"select_limit", OPT_SELECT_LIMIT, "Automatic limit for SELECT when using --safe-updates", (gptr*) &select_limit, - (gptr*) &select_limit, 0, GET_ULONG, REQUIRED_ARG, 1000L, 1, ~0L, 0, 1, 0}, + (gptr*) &select_limit, 0, GET_ULONG, REQUIRED_ARG, 1000L, 1, ULONG_MAX, + 0, 1, 0}, {"max_join_size", OPT_MAX_JOIN_SIZE, "Automatic limit for rows in a join when using --safe-updates", (gptr*) &max_join_size, - (gptr*) &max_join_size, 0, GET_ULONG, REQUIRED_ARG, 1000000L, 1, ~0L, 0, 1, - 0}, + (gptr*) &max_join_size, 0, GET_ULONG, REQUIRED_ARG, 1000000L, 1, ULONG_MAX, + 0, 1, 0}, {"secure-auth", OPT_SECURE_AUTH, "Refuse client connecting to server if it" " uses old (pre-4.1.1) protocol", (gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/client/mysqltest.c b/client/mysqltest.c index eae3b05f61a..4dbf1b11323 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -4986,7 +4986,7 @@ static struct my_option my_long_options[] = "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sleep", 'T', "Sleep always this many seconds on sleep commands.", - (gptr*) &opt_sleep, (gptr*) &opt_sleep, 0, GET_INT, REQUIRED_ARG, -1, 0, 0, + (gptr*) &opt_sleep, (gptr*) &opt_sleep, 0, GET_INT, REQUIRED_ARG, -1, -1, 0, 0, 0, 0}, {"socket", 'S', "Socket file to use for connection.", (gptr*) &unix_sock, (gptr*) &unix_sock, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, diff --git a/include/m_string.h b/include/m_string.h index 981111b8718..c26d0fb9260 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -219,6 +219,7 @@ double my_strtod(const char *str, char **end, int *error); double my_atof(const char *nptr); extern char *llstr(longlong value,char *buff); +extern char *ullstr(longlong value,char *buff); #ifndef HAVE_STRTOUL extern long strtol(const char *str, char **ptr, int base); extern ulong strtoul(const char *str, char **ptr, int base); diff --git a/include/my_getopt.h b/include/my_getopt.h index dcd6ad9d79b..f5688a37231 100644 --- a/include/my_getopt.h +++ b/include/my_getopt.h @@ -67,7 +67,8 @@ extern void my_print_variables(const struct my_option *options); extern void my_getopt_register_get_addr(gptr* (*func_addr)(const char *, uint, const struct my_option *)); -ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp); +ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, + bool *fixed); my_bool getopt_compare_strings(const char *s, const char *t, uint length); C_MODE_END diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index b37679847be..3b2a5027af5 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -109,12 +109,20 @@ c1 DROP TABLE t1; SET @@auto_increment_offset= @bug20627_old_auto_increment_offset; +Warnings: +Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@auto_increment_increment= @bug20627_old_auto_increment_increment; +Warnings: +Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @@session.auto_increment_offset= @bug20627_old_session_auto_increment_offset; +Warnings: +Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@session.auto_increment_increment= @bug20627_old_session_auto_increment_increment; +Warnings: +Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @bug20830_old_auto_increment_offset= @@auto_increment_offset= 2; SET @bug20830_old_auto_increment_increment= @@ -237,12 +245,20 @@ SUM(c1) DROP TABLE t1; SET @@auto_increment_offset= @bug20830_old_auto_increment_offset; +Warnings: +Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@auto_increment_increment= @bug20830_old_auto_increment_increment; +Warnings: +Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @@session.auto_increment_offset= @bug20830_old_session_auto_increment_offset; +Warnings: +Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@session.auto_increment_increment= @bug20830_old_session_auto_increment_increment; +Warnings: +Warning 1292 Truncated incorrect auto-increment-increment value: '0' CREATE TABLE t1(a BIT); INSERT DELAYED INTO t1 VALUES(1); FLUSH TABLE t1; diff --git a/mysql-test/r/index_merge.result b/mysql-test/r/index_merge.result index 15aa636d740..3a152fb2327 100644 --- a/mysql-test/r/index_merge.result +++ b/mysql-test/r/index_merge.result @@ -340,6 +340,8 @@ create table t4 (a int); insert into t4 values (1),(4),(3); set @save_join_buffer_size=@@join_buffer_size; set join_buffer_size= 4000; +Warnings: +Warning 1292 Truncated incorrect join_buffer_size value: '4000' explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 < 500000 or A.key2 < 3) diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index 6082a30bce3..d27bf8df2fa 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -1833,6 +1833,8 @@ show variables like "innodb_thread_concurrency"; Variable_name Value innodb_thread_concurrency 8 set global innodb_thread_concurrency=1001; +Warnings: +Warning 1292 Truncated incorrect innodb_thread_concurrency value: '1001' show variables like "innodb_thread_concurrency"; Variable_name Value innodb_thread_concurrency 1000 @@ -1852,6 +1854,8 @@ show variables like "innodb_concurrency_tickets"; Variable_name Value innodb_concurrency_tickets 1000 set global innodb_concurrency_tickets=0; +Warnings: +Warning 1292 Truncated incorrect innodb_concurrency_tickets value: '0' show variables like "innodb_concurrency_tickets"; Variable_name Value innodb_concurrency_tickets 1 diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result index d5f014b6840..b073e4bd6ce 100644 --- a/mysql-test/r/innodb_mysql.result +++ b/mysql-test/r/innodb_mysql.result @@ -712,6 +712,8 @@ INSERT INTO t1(b,c) SELECT b,c FROM t2; UPDATE t2 SET c='2007-01-03'; INSERT INTO t1(b,c) SELECT b,c FROM t2; set @@sort_buffer_size=8192; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '8192' SELECT COUNT(*) FROM t1; COUNT(*) 3072 diff --git a/mysql-test/r/key_cache.result b/mysql-test/r/key_cache.result index 08d8059f61b..9ada5dc0784 100644 --- a/mysql-test/r/key_cache.result +++ b/mysql-test/r/key_cache.result @@ -276,6 +276,8 @@ Variable_name Value Key_blocks_unused KEY_BLOCKS_UNUSED set global keycache2.key_buffer_size=0; set global keycache3.key_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect key_buffer_size value: '100' set global keycache3.key_buffer_size=0; create table t1 (mytext text, FULLTEXT (mytext)); insert t1 values ('aaabbb'); diff --git a/mysql-test/r/packet.result b/mysql-test/r/packet.result index dfb5595e02d..df0d9ff9adc 100644 --- a/mysql-test/r/packet.result +++ b/mysql-test/r/packet.result @@ -1,7 +1,15 @@ set global max_allowed_packet=100; +Warnings: +Warning 1292 Truncated incorrect max_allowed_packet value: '100' set max_allowed_packet=100; +Warnings: +Warning 1292 Truncated incorrect max_allowed_packet value: '100' set global net_buffer_length=100; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '100' set net_buffer_length=100; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '100' SELECT length("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") as len; len 1024 diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 4db588e5cac..648d6468c0a 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -806,6 +806,8 @@ select @@max_prepared_stmt_count; @@max_prepared_stmt_count 0 set global max_prepared_stmt_count=10000000000000000; +Warnings: +Warning 1292 Truncated incorrect max_prepared_stmt_count value: '10000000000000000' select @@max_prepared_stmt_count; @@max_prepared_stmt_count 1048576 diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index be99bdb1afc..42381e2768e 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -3666,6 +3666,8 @@ CREATE TABLE t1 (a int, b int auto_increment, PRIMARY KEY (b)); CREATE TABLE t2 (x int auto_increment, y int, z int, PRIMARY KEY (x), FOREIGN KEY (y) REFERENCES t1 (b)); SET SESSION sort_buffer_size = 32 * 1024; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '32768' SELECT SQL_NO_CACHE COUNT(*) FROM (SELECT a, b, (SELECT x FROM t2 WHERE y=b ORDER BY z DESC LIMIT 1) c FROM t1) t; @@ -4101,6 +4103,8 @@ INSERT INTO `t1` VALUES ('asdf','2007-02-08 01:11:26'); INSERT INTO `t2` VALUES ('abcdefghijk'); INSERT INTO `t2` VALUES ('asdf'); SET session sort_buffer_size=8192; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '8192' SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.a ORDER BY t1.b LIMIT 1) AS d1 FROM t2; d1 1 diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index 4c1b80c2fd5..0ee38426fbf 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -269,6 +269,8 @@ a+0 b+0 56 379 68 454 set @@max_length_for_sort_data=0; +Warnings: +Warning 1292 Truncated incorrect max_length_for_sort_data value: '0' select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; a+0 b+0 57 135 diff --git a/mysql-test/r/type_bit_innodb.result b/mysql-test/r/type_bit_innodb.result index c4506231f27..d0cf15e59a5 100644 --- a/mysql-test/r/type_bit_innodb.result +++ b/mysql-test/r/type_bit_innodb.result @@ -269,6 +269,8 @@ a+0 b+0 56 379 68 454 set @@max_length_for_sort_data=0; +Warnings: +Warning 1292 Truncated incorrect max_length_for_sort_data value: '0' select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; a+0 b+0 57 135 diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 47cd96b90b7..9c360ef4ab3 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -214,6 +214,8 @@ net_read_timeout 600 net_retry_count 10 net_write_timeout 500 set net_buffer_length=1; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '1' show variables like 'net_buffer_length'; Variable_name Value net_buffer_length 1024 @@ -238,7 +240,7 @@ show variables like '%alloc%'; Variable_name Value query_alloc_block_size 8192 query_prealloc_size 8192 -range_alloc_block_size 2048 +range_alloc_block_size 4096 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 set @@range_alloc_block_size=1024*16; @@ -263,7 +265,7 @@ show variables like '%alloc%'; Variable_name Value query_alloc_block_size 8192 query_prealloc_size 8192 -range_alloc_block_size 2048 +range_alloc_block_size 4096 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 SELECT @@version LIKE 'non-existent'; @@ -321,6 +323,8 @@ select @@autocommit, @@big_tables; @@autocommit @@big_tables 1 1 set global binlog_cache_size=100; +Warnings: +Warning 1292 Truncated incorrect binlog_cache_size value: '100' set bulk_insert_buffer_size=100; set character set cp1251_koi8; set character set default; @@ -349,17 +353,27 @@ set global flush_time=100; set insert_id=1; set interactive_timeout=100; set join_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect join_buffer_size value: '100' set last_insert_id=1; set global local_infile=1; set long_query_time=100; set low_priority_updates=1; set max_allowed_packet=100; +Warnings: +Warning 1292 Truncated incorrect max_allowed_packet value: '100' set global max_binlog_cache_size=100; +Warnings: +Warning 1292 Truncated incorrect max_binlog_cache_size value: '100' set global max_binlog_size=100; +Warnings: +Warning 1292 Truncated incorrect max_binlog_size value: '100' set global max_connect_errors=100; set global max_connections=100; set global max_delayed_threads=100; set max_heap_table_size=100; +Warnings: +Warning 1292 Truncated incorrect max_heap_table_size value: '100' set max_join_size=100; set max_sort_length=100; set max_tmp_tables=100; @@ -370,17 +384,25 @@ select @@max_user_connections; set global max_write_lock_count=100; set myisam_sort_buffer_size=100; set net_buffer_length=100; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '100' set net_read_timeout=100; set net_write_timeout=100; set global query_cache_limit=100; set global query_cache_size=100; set global query_cache_type=demand; set read_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect read_buffer_size value: '100' set read_rnd_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect read_rnd_buffer_size value: '100' set global rpl_recovery_rank=100; set global server_id=100; set global slow_launch_time=100; set sort_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '100' set @@max_sp_recursion_depth=10; select @@max_sp_recursion_depth; @@max_sp_recursion_depth @@ -420,6 +442,8 @@ set storage_engine=myisam; set global thread_cache_size=100; set timestamp=1, timestamp=default; set tmp_table_size=100; +Warnings: +Warning 1292 Truncated incorrect tmp_table_size value: '100' set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; @@ -570,6 +594,8 @@ SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE'; Variable_name Value myisam_data_pointer_size 7 SET GLOBAL table_cache=-1; +Warnings: +Warning 1292 Truncated incorrect table_cache value: '0' SHOW VARIABLES LIKE 'table_cache'; Variable_name Value table_cache 1 diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index a352665379e..be7e7c2b413 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -126,7 +126,7 @@ set GLOBAL query_cache_size=100000; set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; set GLOBAL myisam_max_sort_file_size=default; ---replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE +--replace_result 2147482624 FILE_SIZE 2146435072 FILE_SIZE show variables like 'myisam_max_sort_file_size'; set global net_retry_count=10, session net_retry_count=10; diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 71630e1b4c2..f41e8166876 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -31,6 +31,7 @@ my_bool getopt_compare_strings(const char *s, const char *t, uint length); static longlong getopt_ll(char *arg, const struct my_option *optp, int *err); +static longlong getopt_ll_limit_value(longlong, const struct my_option *); static ulonglong getopt_ull(char *arg, const struct my_option *optp, int *err); static void init_variables(const struct my_option *options); @@ -70,6 +71,7 @@ static void default_reporter(enum loglevel level, fprintf(stderr, "%s", "Info: "); vfprintf(stderr, format, args); va_end(args); + fputc('\n', stderr); fflush(stderr); } @@ -133,7 +135,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: Option '-O' requires an argument\n", + "%s: Option '-O' requires an argument", my_progname); return EXIT_ARGUMENT_REQUIRED; } @@ -151,7 +153,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: Option '--set-variable' requires an argument\n", + "%s: Option '--set-variable' requires an argument", my_progname); return EXIT_ARGUMENT_REQUIRED; } @@ -165,7 +167,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: Option '--set-variable' requires an argument\n", + "%s: Option '--set-variable' requires an argument", my_progname); return EXIT_ARGUMENT_REQUIRED; } @@ -228,7 +230,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: ambiguous option '--%s-%s' (--%s-%s)\n", + "%s: ambiguous option '--%s-%s' (--%s-%s)", my_progname, special_opt_prefix[i], cur_arg, special_opt_prefix[i], prev_found); @@ -265,7 +267,7 @@ int handle_options(int *argc, char ***argv, if (my_getopt_print_errors) my_getopt_error_reporter(option_is_loose ? WARNING_LEVEL : ERROR_LEVEL, - "%s: unknown variable '%s'\n", + "%s: unknown variable '%s'", my_progname, cur_arg); if (!option_is_loose) return EXIT_UNKNOWN_VARIABLE; @@ -273,9 +275,9 @@ int handle_options(int *argc, char ***argv, else { if (my_getopt_print_errors) - my_getopt_error_reporter(option_is_loose ? + my_getopt_error_reporter(option_is_loose ? WARNING_LEVEL : ERROR_LEVEL, - "%s: unknown option '--%s'\n", + "%s: unknown option '--%s'", my_progname, cur_arg); if (!option_is_loose) return EXIT_UNKNOWN_OPTION; @@ -293,7 +295,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: variable prefix '%s' is not unique\n", + "%s: variable prefix '%s' is not unique", my_progname, opt_str); return EXIT_VAR_PREFIX_NOT_UNIQUE; } @@ -301,7 +303,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: ambiguous option '--%s' (%s, %s)\n", + "%s: ambiguous option '--%s' (%s, %s)", my_progname, opt_str, prev_found, optp->name); return EXIT_AMBIGUOUS_OPTION; @@ -324,7 +326,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: option '%s' cannot take an argument\n", + "%s: option '%s' cannot take an argument", my_progname, optp->name); return EXIT_NO_ARGUMENT_ALLOWED; } @@ -337,7 +339,7 @@ int handle_options(int *argc, char ***argv, { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: option '--%s' cannot take an argument\n", + "%s: option '--%s' cannot take an argument", my_progname, optp->name); return EXIT_NO_ARGUMENT_ALLOWED; } @@ -359,7 +361,7 @@ int handle_options(int *argc, char ***argv, { my_getopt_error_reporter(WARNING_LEVEL, "%s: ignoring option '--%s' due to \ -invalid value '%s'\n", +invalid value '%s'", my_progname, optp->name, optend); continue; } @@ -390,7 +392,7 @@ invalid value '%s'\n", { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: option '--%s' requires an argument\n", + "%s: option '--%s' requires an argument", my_progname, optp->name); return EXIT_ARGUMENT_REQUIRED; } @@ -450,7 +452,7 @@ invalid value '%s'\n", { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: option '-%c' requires an argument\n", + "%s: option '-%c' requires an argument", my_progname, optp->id); return EXIT_ARGUMENT_REQUIRED; } @@ -463,7 +465,7 @@ invalid value '%s'\n", set_maximum_value))) { my_getopt_error_reporter(ERROR_LEVEL, - "%s: Error while setting value '%s' to '%s'\n", + "%s: Error while setting value '%s' to '%s'", my_progname, argument, optp->name); return error; } @@ -475,7 +477,7 @@ invalid value '%s'\n", { if (my_getopt_print_errors) my_getopt_error_reporter(ERROR_LEVEL, - "%s: unknown option '-%c'\n", + "%s: unknown option '-%c'", my_progname, *optend); return EXIT_UNKNOWN_OPTION; } @@ -486,13 +488,13 @@ invalid value '%s'\n", if ((error= setval(optp, value, argument, set_maximum_value))) { my_getopt_error_reporter(ERROR_LEVEL, - "%s: Error while setting value '%s' to '%s'\n", + "%s: Error while setting value '%s' to '%s'", my_progname, argument, optp->name); return error; } get_one_option(optp->id, optp, argument); - (*argc)--; /* option handled (short or long), decrease argument count */ + (*argc)--; /* option handled (short or long), decrease argument count */ } else /* non-option found */ (*argv)[argvpos++]= cur_arg; @@ -575,13 +577,17 @@ static int setval(const struct my_option *opts, gptr *value, char *argument, *((my_bool*) result_pos)= (my_bool) atoi(argument) != 0; break; case GET_INT: - case GET_UINT: /* fall through */ *((int*) result_pos)= (int) getopt_ll(argument, opts, &err); break; + case GET_UINT: + *((uint*) result_pos)= (uint) getopt_ull(argument, opts, &err); + break; case GET_LONG: - case GET_ULONG: /* fall through */ *((long*) result_pos)= (long) getopt_ll(argument, opts, &err); break; + case GET_ULONG: + *((long*) result_pos)= (long) getopt_ull(argument, opts, &err); + break; case GET_LL: *((longlong*) result_pos)= getopt_ll(argument, opts, &err); break; @@ -733,23 +739,46 @@ static longlong eval_num_suffix (char *argument, int *error, char *option_name) static longlong getopt_ll(char *arg, const struct my_option *optp, int *err) { - longlong num; + longlong num=eval_num_suffix(arg, err, (char*) optp->name); + return getopt_ll_limit_value(num, optp); +} + +/* + function: getopt_ll_limit_value + + Applies min/max/block_size to a numeric value of an option. + Returns "fixed" value. +*/ + +static longlong getopt_ll_limit_value(longlong num, + const struct my_option *optp) +{ + longlong old= num; + bool trunc= FALSE; + char buf1[255], buf2[255]; ulonglong block_size= (optp->block_size ? (ulonglong) optp->block_size : 1L); - - num= eval_num_suffix(arg, err, (char*) optp->name); - if (num > 0 && (ulonglong) num > (ulonglong) optp->max_value && + + if (num > 0 && ((ulonglong) num > (ulonglong) optp->max_value) && optp->max_value) /* if max value is not set -> no upper limit */ { - char buf[22]; - my_getopt_error_reporter(WARNING_LEVEL, - "Truncated incorrect %s value: '%s'", - optp->name, llstr(num, buf)); - num= (ulonglong) optp->max_value; + trunc= TRUE; } + num= ((num - optp->sub_size) / block_size); num= (longlong) (num * block_size); - return max(num, optp->min_value); + + if (num < optp->min_value) + { + num= optp->min_value; + trunc= TRUE; + } + + if (trunc) + my_getopt_error_reporter(WARNING_LEVEL, + "option '%s': signed value %s adjusted to %s", + optp->name, llstr(old, buf1), llstr(num, buf2)); + return num; } /* @@ -761,25 +790,67 @@ static longlong getopt_ll(char *arg, const struct my_option *optp, int *err) static ulonglong getopt_ull(char *arg, const struct my_option *optp, int *err) { - ulonglong num; - - num= eval_num_suffix(arg, err, (char*) optp->name); - return getopt_ull_limit_value(num, optp); + ulonglong num= eval_num_suffix(arg, err, (char*) optp->name); + return getopt_ull_limit_value(num, optp, NULL); } -ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp) +ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, + bool *fix) { + bool adjusted= FALSE; + ulonglong old= num, mod; + char buf1[255], buf2[255]; + if ((ulonglong) num > (ulonglong) optp->max_value && optp->max_value) /* if max value is not set -> no upper limit */ + { num= (ulonglong) optp->max_value; + adjusted= TRUE; + } + + switch ((optp->var_type & GET_TYPE_MASK)) { + case GET_UINT: + if (num > (ulonglong) UINT_MAX) + { + num= ((ulonglong) UINT_MAX); + adjusted= TRUE; + } + break; + case GET_ULONG: +#if SIZEOF_LONG < SIZEOF_LONG_LONG + if (num > (ulonglong) ULONG_MAX) + { + num= ((ulonglong) ULONG_MAX); + adjusted= TRUE; + } +#endif + break; + default: + DBUG_ASSERT((optp->var_type & GET_TYPE_MASK) == GET_ULL); + break; + } + if (optp->block_size > 1) { num/= (ulonglong) optp->block_size; num*= (ulonglong) optp->block_size; } + if (num < (ulonglong) optp->min_value) + { num= (ulonglong) optp->min_value; + adjusted= TRUE; + } + + if (adjusted) + my_getopt_error_reporter(WARNING_LEVEL, + "option '%s': unsigned value %s adjusted to %s", + optp->name, ullstr(old, buf1), ullstr(num, buf2)); + + if (fix) + *fix= adjusted; + return num; } @@ -789,38 +860,43 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp) SYNOPSIS init_one_value() - option Option to initialize - value Pointer to variable + optp Option to initialize + value Pointer to variable */ -static void init_one_value(const struct my_option *option, gptr *variable, +static void init_one_value(const struct my_option *optp, gptr *variable, longlong value) { - switch ((option->var_type & GET_TYPE_MASK)) { + DBUG_ENTER("init_one_value"); + + switch ((optp->var_type & GET_TYPE_MASK)) { case GET_BOOL: *((my_bool*) variable)= (my_bool) value; break; case GET_INT: - *((int*) variable)= (int) value; + *((int*) variable)= (int) getopt_ll_limit_value(value, optp); break; case GET_UINT: - *((uint*) variable)= (uint) value; + *((uint*) variable)= (uint) getopt_ull_limit_value(value, optp, NULL); break; case GET_LONG: - *((long*) variable)= (long) value; + *((long*) variable)= (long) getopt_ll_limit_value(value, optp); break; case GET_ULONG: - *((ulong*) variable)= (ulong) value; + *((ulong*) variable)= (ulong) getopt_ull_limit_value(value, optp, NULL); break; case GET_LL: - *((longlong*) variable)= (longlong) value; + *((longlong*) variable)= (longlong) getopt_ll_limit_value(value, optp); break; case GET_ULL: - *((ulonglong*) variable)= (ulonglong) value; + *((ulonglong*) variable)= (ulonglong) getopt_ull_limit_value(value, optp, + NULL); break; default: /* dummy default to avoid compiler warnings */ break; } + + DBUG_VOID_RETURN; } @@ -839,9 +915,11 @@ static void init_one_value(const struct my_option *option, gptr *variable, static void init_variables(const struct my_option *options) { + DBUG_ENTER("init_variables"); for (; options->name; options++) { gptr *variable; + DBUG_PRINT("options", ("name: '%s'", options->name)); /* We must set u_max_value first as for some variables options->u_max_value == options->value and in this case we want to @@ -855,6 +933,7 @@ static void init_variables(const struct my_option *options) (variable= (*getopt_get_addr)("", 0, options))) init_one_value(options, variable, options->def_value); } + DBUG_VOID_RETURN; } @@ -957,8 +1036,8 @@ void my_print_variables(const struct my_option *options) (*getopt_get_addr)("", 0, optp) : optp->value); if (value) { - printf("%s", optp->name); - length= (uint) strlen(optp->name); + printf("%s ", optp->name); + length= (uint) strlen(optp->name)+1; for (; length < name_space; length++) putchar(' '); switch ((optp->var_type & GET_TYPE_MASK)) { @@ -977,7 +1056,7 @@ void my_print_variables(const struct my_option *options) printf("%d\n", *((uint*) value)); break; case GET_LONG: - printf("%lu\n", *((long*) value)); + printf("%ld\n", *((long*) value)); break; case GET_ULONG: printf("%lu\n", *((ulong*) value)); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 47396748da6..4334dedfe4e 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -186,7 +186,7 @@ MY_LOCALE *my_locale_by_number(uint number); #define QUERY_ALLOC_PREALLOC_SIZE 8192 #define TRANS_ALLOC_BLOCK_SIZE 4096 #define TRANS_ALLOC_PREALLOC_SIZE 4096 -#define RANGE_ALLOC_BLOCK_SIZE 2048 +#define RANGE_ALLOC_BLOCK_SIZE 4096 #define ACL_ALLOC_BLOCK_SIZE 1024 #define UDF_ALLOC_BLOCK_SIZE 1024 #define TABLE_ALLOC_BLOCK_SIZE 1024 diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 4c459d34a55..3900f74da7e 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4958,7 +4958,7 @@ Disable with --skip-bdb (will save memory).", {"concurrent-insert", OPT_CONCURRENT_INSERT, "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0", (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, - 0, GET_LONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, + 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -5127,7 +5127,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, {"innodb_max_purge_lag", OPT_INNODB_MAX_PURGE_LAG, "Desired maximum length of the purge queue (0 = no limit)", (gptr*) &srv_max_purge_lag, - (gptr*) &srv_max_purge_lag, 0, GET_LONG, REQUIRED_ARG, 0, 0, ~0L, + (gptr*) &srv_max_purge_lag, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1L, 0}, {"innodb_rollback_on_timeout", OPT_INNODB_ROLLBACK_ON_TIMEOUT, "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)", @@ -5237,7 +5237,8 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, #ifdef HAVE_MMAP {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", (gptr*) &opt_tc_log_size, (gptr*) &opt_tc_log_size, 0, GET_ULONG, - REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ~0L, 0, TC_LOG_PAGE_SIZE, 0}, + REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ULONG_MAX, 0, + TC_LOG_PAGE_SIZE, 0}, #endif {"log-update", OPT_UPDATE_LOG, "The update log is deprecated since version 5.0, is replaced by the binary \ @@ -5660,8 +5661,8 @@ log and this option does nothing anymore.", NO_ARG, 0, 0, 0, 0, 0, 0}, {"warnings", 'W', "Deprecated; use --log-warnings instead.", (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, - 0, 0, 0}, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, + 1, 0, ULONG_MAX, 0, 0, 0}, { "back_log", OPT_BACK_LOG, "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", (gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG, @@ -5670,29 +5671,29 @@ log and this option does nothing anymore.", { "bdb_cache_size", OPT_BDB_CACHE_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG, - REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0}, + REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, ULONG_MAX, 0, IO_SIZE, 0}, /* QQ: The following should be removed soon! (bdb_max_lock preferred) */ {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 10000, 0, ULONG_MAX, 0, 1, 0}, {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, - GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0}, + GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ULONG_MAX, 0, 1024, 0}, {"bdb_max_lock", OPT_BDB_MAX_LOCK, "The maximum number of locks you can have active on a BDB table.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 10000, 0, ULONG_MAX, 0, 1, 0}, #endif /* HAVE_BERKELEY_DB */ {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, - REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, + REQUIRED_ARG, 32*1024L, IO_SIZE, ULONG_MAX, 0, IO_SIZE, 0}, {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!", (gptr*) &global_system_variables.bulk_insert_buff_size, (gptr*) &max_system_variables.bulk_insert_buff_size, - 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0}, + 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ULONG_MAX, 0, 1, 0}, {"connect_timeout", OPT_CONNECT_TIMEOUT, "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", (gptr*) &connect_timeout, (gptr*) &connect_timeout, @@ -5715,7 +5716,7 @@ log and this option does nothing anymore.", {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT, "After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.", (gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, DELAYED_LIMIT, 1, ULONG_MAX, 0, 1, 0}, {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0, @@ -5723,7 +5724,7 @@ log and this option does nothing anymore.", { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE, "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", (gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ULONG_MAX, 0, 1, 0}, {"div_precision_increment", OPT_DIV_PRECINCREMENT, "Precision of the result of '/' operator will be increased on that value.", (gptr*) &global_system_variables.div_precincrement, @@ -5760,16 +5761,16 @@ log and this option does nothing anymore.", (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, - "The maximum length of the result of function group_concat.", + "The maximum length of the result of function group_concat.", (gptr*) &global_system_variables.group_concat_max_len, (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, - REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 1024, 4, ULONG_MAX, 0, 1, 0}, #ifdef HAVE_INNOBASE_DB {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", (gptr*) &innobase_additional_mem_pool_size, (gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG, - 1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0}, + 1*1024*1024L, 512*1024L, LONG_MAX, 0, 1024, 0}, {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT, "Data file autoextend increment in megabytes", (gptr*) &srv_auto_extend_increment, @@ -5793,7 +5794,7 @@ log and this option does nothing anymore.", SQL query after it has once got the ticket", (gptr*) &srv_n_free_tickets_to_enter, (gptr*) &srv_n_free_tickets_to_enter, - 0, GET_LONG, REQUIRED_ARG, 500L, 1L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 500L, 1L, ULONG_MAX, 0, 1L, 0}, {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS, "Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads, (gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0, @@ -5809,7 +5810,7 @@ log and this option does nothing anymore.", {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, "The size of the buffer which InnoDB uses to write log to the log files on disk.", (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0, - GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0}, + GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, LONG_MAX, 0, 1024, 0}, {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, "Size of each log file in a log group.", (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0, @@ -5827,24 +5828,24 @@ log and this option does nothing anymore.", {"innodb_open_files", OPT_INNODB_OPEN_FILES, "How many files at the maximum InnoDB keeps open at the same time.", (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0, - GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0}, + GET_LONG, REQUIRED_ARG, 300L, 10L, LONG_MAX, 0, 1L, 0}, {"innodb_sync_spin_loops", OPT_INNODB_SYNC_SPIN_LOOPS, "Count of spin-loop rounds in InnoDB mutexes", (gptr*) &srv_n_spin_wait_rounds, (gptr*) &srv_n_spin_wait_rounds, - 0, GET_LONG, REQUIRED_ARG, 20L, 0L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 20L, 0L, ULONG_MAX, 0, 1L, 0}, {"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY, "Helps in performance tuning in heavily concurrent environments. " "Sets the maximum number of threads allowed inside InnoDB. Value 0" " will disable the thread throttling.", (gptr*) &srv_thread_concurrency, (gptr*) &srv_thread_concurrency, - 0, GET_LONG, REQUIRED_ARG, 8, 0, 1000, 0, 1, 0}, + 0, GET_ULONG, REQUIRED_ARG, 8, 0, 1000, 0, 1, 0}, {"innodb_thread_sleep_delay", OPT_INNODB_THREAD_SLEEP_DELAY, "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0" " disable a sleep", (gptr*) &srv_thread_sleep_delay, (gptr*) &srv_thread_sleep_delay, - 0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 10000L, 0L, ULONG_MAX, 0, 1L, 0}, #endif /* HAVE_INNOBASE_DB */ {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive connection before closing it.", @@ -5874,7 +5875,7 @@ log and this option does nothing anymore.", (gptr*) &dflt_key_cache_var.param_age_threshold, (gptr*) 0, 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, - 300, 100, ~0L, 0, 100, 0}, + 300, 100, ULONG_MAX, 0, 100, 0}, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "The default size of key cache blocks", (gptr*) &dflt_key_cache_var.param_block_size, @@ -5910,7 +5911,7 @@ log and this option does nothing anymore.", {"max_binlog_cache_size", OPT_MAX_BINLOG_CACHE_SIZE, "Can be used to restrict the total size used to cache a multi-transaction query.", (gptr*) &max_binlog_cache_size, (gptr*) &max_binlog_cache_size, 0, - GET_ULONG, REQUIRED_ARG, ~0L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, + GET_ULONG, REQUIRED_ARG, ULONG_MAX, IO_SIZE, ULONG_MAX, 0, IO_SIZE, 0}, {"max_binlog_size", OPT_MAX_BINLOG_SIZE, "Binary log will be rotated automatically when the size exceeds this \ value. Will also apply to relay logs if max_relay_log_size is 0. \ @@ -5920,7 +5921,7 @@ The minimum value for this variable is 4096.", {"max_connect_errors", OPT_MAX_CONNECT_ERRORS, "If there is more than this number of interrupted connections from a host this host will be blocked from further connections.", (gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG, - REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ULONG_MAX, 0, 1, 0}, {"max_connections", OPT_MAX_CONNECTIONS, "The number of simultaneous clients allowed.", (gptr*) &max_connections, (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1, @@ -5963,7 +5964,7 @@ The minimum value for this variable is 4096.", "Limit assumed max number of seeks when looking up rows based on a key", (gptr*) &global_system_variables.max_seeks_for_key, (gptr*) &max_system_variables.max_seeks_for_key, 0, GET_ULONG, - REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0 }, + REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0 }, {"max_sort_length", OPT_MAX_SORT_LENGTH, "The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored).", (gptr*) &global_system_variables.max_sort_length, @@ -5978,20 +5979,20 @@ The minimum value for this variable is 4096.", "Maximum number of temporary tables a client can keep open at a time.", (gptr*) &global_system_variables.max_tmp_tables, (gptr*) &max_system_variables.max_tmp_tables, 0, GET_ULONG, - REQUIRED_ARG, 32, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, 32, 1, ULONG_MAX, 0, 1, 0}, {"max_user_connections", OPT_MAX_USER_CONNECTIONS, "The maximum number of active connections for a single user (0 = no limit).", (gptr*) &max_user_connections, (gptr*) &max_user_connections, 0, GET_UINT, - REQUIRED_ARG, 0, 1, ~0, 0, 1, 0}, + REQUIRED_ARG, 0, 0, (uint) ~0, 0, 1, 0}, {"max_write_lock_count", OPT_MAX_WRITE_LOCK_COUNT, "After this many write locks, allow some read locks to run in between.", (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, - REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0}, {"multi_range_count", OPT_MULTI_RANGE_COUNT, "Number of key ranges to request at once.", (gptr*) &global_system_variables.multi_range_count, (gptr*) &max_system_variables.multi_range_count, 0, - GET_ULONG, REQUIRED_ARG, 256, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 256, 1, ULONG_MAX, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", (gptr*) &opt_myisam_block_size, @@ -6019,7 +6020,7 @@ The minimum value for this variable is 4096.", "Number of threads to use when repairing MyISAM tables. The value of 1 disables parallel repair.", (gptr*) &global_system_variables.myisam_repair_threads, (gptr*) &max_system_variables.myisam_repair_threads, 0, - GET_ULONG, REQUIRED_ARG, 1, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 1, 1, ULONG_MAX, 0, 1, 0}, {"myisam_sort_buffer_size", OPT_MYISAM_SORT_BUFFER_SIZE, "The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE.", (gptr*) &global_system_variables.myisam_sort_buff_size, @@ -6045,7 +6046,7 @@ The minimum value for this variable is 4096.", "If a read on a communication port is interrupted, retry this many times before giving up.", (gptr*) &global_system_variables.net_retry_count, (gptr*) &max_system_variables.net_retry_count,0, - GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ULONG_MAX, 0, 1, 0}, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT, "Number of seconds to wait for a block to be written to a connection before aborting the write.", (gptr*) &global_system_variables.net_write_timeout, @@ -6074,7 +6075,7 @@ The minimum value for this variable is 4096.", "Allocation block size for query parsing and execution", (gptr*) &global_system_variables.query_alloc_block_size, (gptr*) &max_system_variables.query_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, #ifdef HAVE_QUERY_CACHE {"query_cache_limit", OPT_QUERY_CACHE_LIMIT, "Don't cache results that are bigger than this.", @@ -6107,12 +6108,13 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.query_prealloc_size, (gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE, - ~0L, 0, 1024, 0}, + ULONG_MAX, 0, 1024, 0}, {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, "Allocation block size for storing ranges during optimization", (gptr*) &global_system_variables.range_alloc_block_size, (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 4096, ~0L, 0, 1024, 0}, + REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE, ULONG_MAX, + 0, 1024, 0}, {"read_buffer_size", OPT_RECORD_BUFFER, "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", (gptr*) &global_system_variables.read_buff_size, @@ -6182,7 +6184,7 @@ The minimum value for this variable is 4096.", "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", (gptr*) &sync_binlog_period, (gptr*) &sync_binlog_period, 0, GET_ULONG, - REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, + REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1, 0}, {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, @@ -6206,7 +6208,7 @@ The minimum value for this variable is 4096.", {"thread_stack", OPT_THREAD_STACK, "The stack size for each thread.", (gptr*) &thread_stack, (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, - 1024L*128L, ~0L, 0, 1024, 0}, + 1024L*128L, ULONG_MAX, 0, 1024, 0}, { "time_format", OPT_TIME_FORMAT, "The TIME format (for future).", (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], @@ -6222,12 +6224,12 @@ The minimum value for this variable is 4096.", "Allocation block size for various transaction-related structures", (gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, "Persistent buffer for various transaction-related structures", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, - REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", (gptr*) &global_system_variables.updatable_views_with_limit, diff --git a/sql/set_var.cc b/sql/set_var.cc index 066b879fa6b..84b3f92c1ca 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -113,6 +113,9 @@ static int check_max_delayed_threads(THD *thd, set_var *var); static void fix_thd_mem_root(THD *thd, enum_var_type type); static void fix_trans_mem_root(THD *thd, enum_var_type type); static void fix_server_id(THD *thd, enum_var_type type); +static ulonglong fix_unsigned(THD *thd, ulonglong num, + const struct my_option *option_limits); +static void throw_bounds_warning(THD *thd, const char *name, ulonglong num); static KEY_CACHE *create_key_cache(const char *name, uint length); void fix_sql_mode_var(THD *thd, enum_var_type type); static byte *get_error_count(THD *thd); @@ -1448,6 +1451,27 @@ static void fix_server_id(THD *thd, enum_var_type type) } +static void throw_bounds_warning(THD *thd, const char *name, ulonglong num) +{ + char buf[22]; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), name, + ullstr(num, buf)); +} + +static ulonglong fix_unsigned(THD *thd, ulonglong num, + const struct my_option *option_limits) +{ + bool fixed= FALSE; + ulonglong out= getopt_ull_limit_value(num, option_limits, &fixed); + + if (fixed) + throw_bounds_warning(thd, option_limits->name, num); + return out; +} + + sys_var_long_ptr:: sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, sys_after_update_func after_update_arg) @@ -1468,9 +1492,20 @@ bool sys_var_long_ptr_global::update(THD *thd, set_var *var) ulonglong tmp= var->save_result.ulonglong_value; pthread_mutex_lock(guard); if (option_limits) - *value= (ulong) getopt_ull_limit_value(tmp, option_limits); + *value= (ulong) fix_unsigned(thd, tmp, option_limits); else + { +#if SIZEOF_LONG < SIZEOF_LONG_LONG + /* Avoid overflows on 32 bit systems */ + if (tmp > (ulonglong) ~(ulong) 0) + { + tmp= ((ulonglong) ~(ulong) 0); + throw_bounds_warning(thd, name, var->save_result.ulonglong_value); + } +#endif *value= (ulong) tmp; + } + pthread_mutex_unlock(guard); return 0; } @@ -1489,7 +1524,7 @@ bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var) ulonglong tmp= var->save_result.ulonglong_value; pthread_mutex_lock(&LOCK_global_system_variables); if (option_limits) - *value= (ulonglong) getopt_ull_limit_value(tmp, option_limits); + *value= (ulonglong) fix_unsigned(thd, tmp, option_limits); else *value= (ulonglong) tmp; pthread_mutex_unlock(&LOCK_global_system_variables); @@ -1539,38 +1574,29 @@ bool sys_var_thd_ulong::check(THD *thd, set_var *var) bool sys_var_thd_ulong::update(THD *thd, set_var *var) { ulonglong tmp= var->save_result.ulonglong_value; - char buf[22]; - bool truncated= false; /* Don't use bigger value than given with --maximum-variable-name=.. */ if ((ulong) tmp > max_system_variables.*offset) { - truncated= true; - llstr(tmp, buf); + throw_bounds_warning(thd, name, tmp); tmp= max_system_variables.*offset; } -#if SIZEOF_LONG == 4 - /* Avoid overflows on 32 bit systems */ - if (tmp > (ulonglong) ~(ulong) 0) + if (option_limits) + tmp= (ulong) fix_unsigned(thd, tmp, option_limits); +#if SIZEOF_LONG < SIZEOF_LONG_LONG + else if (tmp > (ulonglong) ~(ulong) 0) { - truncated= true; - llstr(tmp, buf); tmp= ((ulonglong) ~(ulong) 0); + throw_bounds_warning(thd, name, var->save_result.ulonglong_value); } #endif - if (truncated) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_TRUNCATED_WRONG_VALUE, - ER(ER_TRUNCATED_WRONG_VALUE), name, - buf); - if (option_limits) - tmp= (ulong) getopt_ull_limit_value(tmp, option_limits); if (var->type == OPT_GLOBAL) global_system_variables.*offset= (ulong) tmp; else thd->variables.*offset= (ulong) tmp; + return 0; } @@ -1605,7 +1631,7 @@ bool sys_var_thd_ha_rows::update(THD *thd, set_var *var) tmp= max_system_variables.*offset; if (option_limits) - tmp= (ha_rows) getopt_ull_limit_value(tmp, option_limits); + tmp= (ha_rows) fix_unsigned(thd, tmp, option_limits); if (var->type == OPT_GLOBAL) { /* Lock is needed to make things safe on 32 bit systems */ @@ -1649,7 +1675,7 @@ bool sys_var_thd_ulonglong::update(THD *thd, set_var *var) tmp= max_system_variables.*offset; if (option_limits) - tmp= getopt_ull_limit_value(tmp, option_limits); + tmp= fix_unsigned(thd, tmp, option_limits); if (var->type == OPT_GLOBAL) { /* Lock is needed to make things safe on 32 bit systems */ @@ -2493,7 +2519,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) } key_cache->param_buff_size= - (ulonglong) getopt_ull_limit_value(tmp, option_limits); + (ulonglong) fix_unsigned(thd, tmp, option_limits); /* If key cache didn't existed initialize it, else resize it */ key_cache->in_init= 1; @@ -2541,7 +2567,7 @@ bool sys_var_key_cache_long::update(THD *thd, set_var *var) goto end; *((ulong*) (((char*) key_cache) + offset))= - (ulong) getopt_ull_limit_value(tmp, option_limits); + (ulong) fix_unsigned(thd, tmp, option_limits); /* Don't create a new key cache if it didn't exist diff --git a/strings/llstr.c b/strings/llstr.c index 12aea63e014..643cf36a311 100644 --- a/strings/llstr.c +++ b/strings/llstr.c @@ -32,3 +32,9 @@ char *llstr(longlong value,char *buff) longlong10_to_str(value,buff,-10); return buff; } + +char *ullstr(longlong value,char *buff) +{ + longlong10_to_str(value,buff,10); + return buff; +} -- cgit v1.2.1 From cef31e05e58b43ec4f7c02569432df70c60969f5 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 30 Nov 2007 17:08:00 +0400 Subject: Bug #32374 crash with filesort when selecting from federated table and view. filesort() uses file->estimate_rows_upper_bound() call to allocate internal buffers. If this function returns a value smaller than a number of row that will be returned later in find_all_keys(), that can cause server crash. Fixed by implementing ha_federated::estimate_rows_upper_bound() to return maximum possible number of rows. Present estimation for FEDERATED always returns 0 if the linked to the VIEW. mysql-test/r/federated.result: Bug #32374 crash with filesort when selecting from federated table and view. test result mysql-test/t/federated.test: Bug #32374 crash with filesort when selecting from federated table and view. test case sql/ha_federated.cc: Bug #32374 crash with filesort when selecting from federated table and view. ha_federated::estimate_rows_upper_bound() implemented sql/ha_federated.h: Bug #32374 crash with filesort when selecting from federated table and view. ha_federated::estimate_rows_upper_bound() interface --- mysql-test/r/federated.result | 111 ++++++++++++++++++++++++++++++++++++++++++ mysql-test/t/federated.test | 31 ++++++++++++ sql/ha_federated.cc | 18 +++++++ sql/ha_federated.h | 1 + 4 files changed, 161 insertions(+) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 2e7d0ddcea7..a005db4deac 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1934,6 +1934,117 @@ select * from federated.t2; a 1 drop table federated.t1, federated.t2; +create table t1 (a varchar(256)); +drop view if exists v1; +create view v1 as select a from t1; +create table t1 +(a varchar(256)) engine=federated +connection='mysql://root@127.0.0.1:SLAVE_PORT/test/v1'; +select 1 from t1 order by a; +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +drop table t1; +drop table t1; +drop view v1; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index cc66a6ab4bc..d4f22650a32 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1686,4 +1686,35 @@ insert into federated.t1 (a) values (1); select * from federated.t2; drop table federated.t1, federated.t2; +# +# Bug #32374 crash with filesort when selecting from federated table and view +# +connection slave; +create table t1 (a varchar(256)); +--disable_warnings +drop view if exists v1; +--enable_warnings +create view v1 as select a from t1; +--disable_query_log +let $n= 100; +while ($n) +{ + insert into t1 values (repeat('a',200)); + dec $n; +} +--enable_query_log + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table t1 + (a varchar(256)) engine=federated + connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/v1'; + +select 1 from t1 order by a; +drop table t1; +connection slave; +drop table t1; +drop view v1; + + source include/federated_cleanup.inc; diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index d7f2309657b..ac1e0962ffb 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -2166,6 +2166,24 @@ error: } +/* + This method is used exlusevely by filesort() to check if we + can create sorting buffers of necessary size. + If the handler returns more records that it declares + here server can just crash on filesort(). + We cannot guarantee that's not going to happen with + the FEDERATED engine, as we have records==0 always if the + client is a VIEW, and for the table the number of + records can inpredictably change during execution. + So we return maximum possible value here. +*/ + +ha_rows ha_federated::estimate_rows_upper_bound() +{ + return HA_POS_ERROR; +} + + /* Initialized at each key walk (called multiple times unlike rnd_init()) */ int ha_federated::index_init(uint keynr) diff --git a/sql/ha_federated.h b/sql/ha_federated.h index dc4f976c578..349c596ae5a 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -277,6 +277,7 @@ public: int update_row(const byte *old_data, byte *new_data); int delete_row(const byte *buf); int index_init(uint keynr); + ha_rows estimate_rows_upper_bound(); int index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte *buf, uint idx, const byte *key, -- cgit v1.2.1 From ff7e7fcb6794d243b8324c3a76359a9c5ac68710 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 30 Nov 2007 18:48:22 +0400 Subject: Bug#32775 problems with SHOW EVENTS and Information_Schema removed unnecessary privilege checks for I_S schema mysql-test/r/information_schema.result: test result mysql-test/t/information_schema.test: test case sql/events.cc: There is no events in I_S so we don't need to execute check_access here. sql/sql_parse.cc: removed unnecessary check --- mysql-test/r/information_schema.result | 7 +++++++ mysql-test/t/information_schema.test | 8 ++++++++ sql/events.cc | 4 ++-- sql/sql_parse.cc | 4 ---- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 4df3b56da86..a2c337e00df 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1611,4 +1611,11 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE tables ALL NULL NULL NULL NULL NULL NULL Skip_open_table; Scanned all databases Warnings: Note 1003 select 1 AS `1` from `information_schema`.`tables` +use information_schema; +show events; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation +show events from information_schema; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation +show events where Db= 'information_schema'; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation End of 5.1 tests. diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 2dd19a94758..0d53c404fa9 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -1238,4 +1238,12 @@ select * from `information_schema`.`VIEWS` where `TABLE_NAME` = NULL; # explain extended select 1 from information_schema.tables; +# +# Bug#32775 problems with SHOW EVENTS and Information_Schema +# +use information_schema; +show events; +show events from information_schema; +show events where Db= 'information_schema'; + --echo End of 5.1 tests. diff --git a/sql/events.cc b/sql/events.cc index 262c62bdbc8..4579fb4d086 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -825,8 +825,8 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */) if (thd->lex->sql_command == SQLCOM_SHOW_EVENTS) { DBUG_ASSERT(thd->lex->select_lex.db); - if (check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0, - is_schema_db(thd->lex->select_lex.db))) + if (!is_schema_db(thd->lex->select_lex.db) && // There is no events in I_S + check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0, 0)) DBUG_RETURN(1); db= thd->lex->select_lex.db; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 9f510f9b33c..7a777ba2bbd 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1855,10 +1855,6 @@ mysql_execute_command(THD *thd) switch (lex->sql_command) { case SQLCOM_SHOW_EVENTS: - if ((res= check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0, - is_schema_db(thd->lex->select_lex.db)))) - break; - /* fall through */ case SQLCOM_SHOW_STATUS_PROC: case SQLCOM_SHOW_STATUS_FUNC: res= execute_sqlcom_select(thd, all_tables); -- cgit v1.2.1 From 05fbb233d8e500da274dbd4bcffc134b23cfca13 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 30 Nov 2007 19:16:13 +0400 Subject: Bug #26921 Problem in mysql_insert_id() Embedded C API function. client library only sets mysql->insert_id when query returned no recordset. So the embedded library should behave the same way libmysqld/lib_sql.cc: Bug #26921 Problem in mysql_insert_id() Embedded C API function. only set 'affected_rows' and 'insert_id' fields when query didn't return a recordset tests/mysql_client_test.c: Bug #26921 Problem in mysql_insert_id() Embedded C API function. testcase added --- libmysqld/lib_sql.cc | 8 +++++--- mysql-test/r/bdb_notembedded.result | 35 ---------------------------------- mysql-test/t/bdb_notembedded.test | 38 ------------------------------------- tests/mysql_client_test.c | 11 +++++++++++ 4 files changed, 16 insertions(+), 76 deletions(-) delete mode 100644 mysql-test/r/bdb_notembedded.result delete mode 100644 mysql-test/t/bdb_notembedded.test diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 7ac663480c8..6aee2fd6614 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -251,9 +251,11 @@ static my_bool emb_read_query_result(MYSQL *mysql) mysql->warning_count= res->embedded_info->warning_count; mysql->server_status= res->embedded_info->server_status; mysql->field_count= res->fields; - mysql->fields= res->embedded_info->fields_list; - mysql->affected_rows= res->embedded_info->affected_rows; - mysql->insert_id= res->embedded_info->insert_id; + if (!(mysql->fields= res->embedded_info->fields_list)) + { + mysql->affected_rows= res->embedded_info->affected_rows; + mysql->insert_id= res->embedded_info->insert_id; + } mysql->net.last_errno= 0; mysql->net.last_error[0]= 0; mysql->info= 0; diff --git a/mysql-test/r/bdb_notembedded.result b/mysql-test/r/bdb_notembedded.result deleted file mode 100644 index 14cb5fad915..00000000000 --- a/mysql-test/r/bdb_notembedded.result +++ /dev/null @@ -1,35 +0,0 @@ -set autocommit=1; -reset master; -create table bug16206 (a int); -insert into bug16206 values(1); -start transaction; -insert into bug16206 values(2); -commit; -show binlog events; -Log_name Pos Event_type Server_id End_log_pos Info -f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 -f n Query 1 n use `test`; create table bug16206 (a int) -f n Query 1 n use `test`; insert into bug16206 values(1) -f n Query 1 n use `test`; insert into bug16206 values(2) -drop table bug16206; -reset master; -create table bug16206 (a int) engine= bdb; -insert into bug16206 values(0); -insert into bug16206 values(1); -start transaction; -insert into bug16206 values(2); -commit; -insert into bug16206 values(3); -show binlog events; -Log_name Pos Event_type Server_id End_log_pos Info -f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 -f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb -f n Query 1 n use `test`; insert into bug16206 values(0) -f n Query 1 n use `test`; insert into bug16206 values(1) -f n Query 1 n use `test`; BEGIN -f n Query 1 n use `test`; insert into bug16206 values(2) -f n Query 1 n use `test`; COMMIT -f n Query 1 n use `test`; insert into bug16206 values(3) -drop table bug16206; -set autocommit=0; -End of 5.0 tests diff --git a/mysql-test/t/bdb_notembedded.test b/mysql-test/t/bdb_notembedded.test deleted file mode 100644 index 24e64ebbfb2..00000000000 --- a/mysql-test/t/bdb_notembedded.test +++ /dev/null @@ -1,38 +0,0 @@ --- source include/not_embedded.inc --- source include/have_bdb.inc - -# -# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode -# -set autocommit=1; - -let $VERSION=`select version()`; - -reset master; -create table bug16206 (a int); -insert into bug16206 values(1); -start transaction; -insert into bug16206 values(2); -commit; ---replace_result $VERSION VERSION ---replace_column 1 f 2 n 5 n -show binlog events; -drop table bug16206; - -reset master; -create table bug16206 (a int) engine= bdb; -insert into bug16206 values(0); -insert into bug16206 values(1); -start transaction; -insert into bug16206 values(2); -commit; -insert into bug16206 values(3); ---replace_result $VERSION VERSION ---replace_column 1 f 2 n 5 n -show binlog events; -drop table bug16206; - -set autocommit=0; - - ---echo End of 5.0 tests diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 29935a4924d..eb255e7881c 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -15358,6 +15358,8 @@ static void test_bug21206() Test that client gets updated value of insert_id on UPDATE that uses LAST_INSERT_ID(expr). + select_query added to test for bug + #26921 Problem in mysql_insert_id() Embedded C API function */ static void test_bug21726() { @@ -15370,6 +15372,8 @@ static void test_bug21726() const char *update_query= "UPDATE t1 SET i= LAST_INSERT_ID(i + 1)"; int rc; my_ulonglong insert_id; + const char *select_query= "SELECT * FROM t1"; + MYSQL_RES *result; DBUG_ENTER("test_bug21726"); myheader("test_bug21726"); @@ -15386,6 +15390,13 @@ static void test_bug21726() insert_id= mysql_insert_id(mysql); DIE_UNLESS(insert_id == 3); + rc= mysql_query(mysql, select_query); + myquery(rc); + insert_id= mysql_insert_id(mysql); + DIE_UNLESS(insert_id == 3); + result= mysql_store_result(mysql); + mysql_free_result(result); + DBUG_VOID_RETURN; } -- cgit v1.2.1 From 6959d20472f2452f4bedd600224ca565d0d5886e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 30 Nov 2007 22:17:11 +0400 Subject: Bug #30430 crash:./mtr --embedded-server --ps-protocol cache_innodb func_misc... PS-protocol data is stored in different format - the MYSQL_RECORDS->data contains the link to the record content, not to array of the links to the field's contents. So we have to handle it separately for embedded-server query cache. libmysqld/emb_qcache.cc: Bug #30430 crash:./mtr --embedded-server --ps-protocol cache_innodb func_misc... ps_protocol data now stored in it's particular way. libmysqld/emb_qcache.h: Bug #30430 crash:./mtr --embedded-server --ps-protocol cache_innodb func_misc... Querycache_stream::load_char/store_char -> load_uchar/store_uchar mysql-test/r/func_misc.result: Bug #30430 crash:./mtr --embedded-server --ps-protocol cache_innodb func_misc... test result mysql-test/t/func_misc.test: Bug #30430 crash:./mtr --embedded-server --ps-protocol cache_innodb func_misc... the test uses t2 table, so let's make sure it doesn't exist befor the start --- libmysqld/emb_qcache.cc | 103 ++++++++++++++++++++++++++++-------------- libmysqld/emb_qcache.h | 4 +- mysql-test/r/func_misc.result | 2 +- mysql-test/t/func_misc.test | 2 +- 4 files changed, 73 insertions(+), 38 deletions(-) diff --git a/libmysqld/emb_qcache.cc b/libmysqld/emb_qcache.cc index 17549bfa96b..6693627e05e 100644 --- a/libmysqld/emb_qcache.cc +++ b/libmysqld/emb_qcache.cc @@ -19,7 +19,7 @@ #include "emb_qcache.h" #include "embedded_priv.h" -void Querycache_stream::store_char(char c) +void Querycache_stream::store_uchar(uchar c) { if (data_end == cur_data) use_next_block(TRUE); @@ -142,7 +142,7 @@ void Querycache_stream::store_safe_str(const char *str, uint str_len) store_int(0); } -char Querycache_stream::load_char() +uchar Querycache_stream::load_uchar() { if (cur_data == data_end) use_next_block(FALSE); @@ -301,8 +301,8 @@ uint emb_count_querycache_size(THD *thd) *data->embedded_info->prev_ptr= NULL; // this marks the last record cur_row= data->data; n_rows= data->rows; - /* n_fields + n_rows + (field_info + strlen * n_rows) * n_fields */ - result+= (uint) (4+8 + (42 + 4*n_rows)*data->fields); + /* n_fields + n_rows + field_info * n_fields */ + result+= (uint) (4+8 + 42*data->fields); for(; field < field_end; field++) { @@ -313,13 +313,23 @@ uint emb_count_querycache_size(THD *thd) result+= field->def_length; } - for (; cur_row; cur_row=cur_row->next) + if (thd->protocol == &thd->protocol_binary) { - MYSQL_ROW col= cur_row->data; - MYSQL_ROW col_end= col + data->fields; - for (; col < col_end; col++) - if (*col) - result+= *(uint *)((*col) - sizeof(uint)); + result+= (uint) (4*n_rows); + for (; cur_row; cur_row=cur_row->next) + result+= cur_row->length; + } + else + { + result+= (uint) (4*n_rows*data->fields); + for (; cur_row; cur_row=cur_row->next) + { + MYSQL_ROW col= cur_row->data; + MYSQL_ROW col_end= col + data->fields; + for (; col < col_end; col++) + if (*col) + result+= *(uint *)((*col) - sizeof(uint)); + } } return result; } @@ -353,10 +363,10 @@ void emb_store_querycache_result(Querycache_stream *dst, THD *thd) { dst->store_int((uint)field->length); dst->store_int((uint)field->max_length); - dst->store_char((char)field->type); + dst->store_uchar((uchar)field->type); dst->store_short((ushort)field->flags); dst->store_short((ushort)field->charsetnr); - dst->store_char((char)field->decimals); + dst->store_uchar((uchar)field->decimals); dst->store_str(field->name, field->name_length); dst->store_str(field->table, field->table_length); dst->store_str(field->org_name, field->org_name_length); @@ -366,14 +376,22 @@ void emb_store_querycache_result(Querycache_stream *dst, THD *thd) dst->store_safe_str(field->def, field->def_length); } - for (; cur_row; cur_row=cur_row->next) + if (thd->protocol == &thd->protocol_binary) { - MYSQL_ROW col= cur_row->data; - MYSQL_ROW col_end= col + data->fields; - for (; col < col_end; col++) + for (; cur_row; cur_row=cur_row->next) + dst->store_str((char *) cur_row->data, cur_row->length); + } + else + { + for (; cur_row; cur_row=cur_row->next) { - uint len= *col ? *(uint *)((*col) - sizeof(uint)) : 0; - dst->store_safe_str(*col, len); + MYSQL_ROW col= cur_row->data; + MYSQL_ROW col_end= col + data->fields; + for (; col < col_end; col++) + { + uint len= *col ? *(uint *)((*col) - sizeof(uint)) : 0; + dst->store_safe_str(*col, len); + } } } DBUG_ASSERT(emb_count_querycache_size(thd) == dst->stored_size); @@ -408,10 +426,10 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src) { field->length= src->load_int(); field->max_length= (unsigned int)src->load_int(); - field->type= (enum enum_field_types)src->load_char(); + field->type= (enum enum_field_types)src->load_uchar(); field->flags= (unsigned int)src->load_short(); field->charsetnr= (unsigned int)src->load_short(); - field->decimals= (unsigned int)src->load_char(); + field->decimals= src->load_uchar(); if (!(field->name= src->load_str(f_alloc, &field->name_length)) || !(field->table= src->load_str(f_alloc,&field->table_length)) || @@ -423,26 +441,43 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src) goto err; } - row= (MYSQL_ROWS *)alloc_root(&data->alloc, - (uint) (rows * sizeof(MYSQL_ROWS) + - rows*(data->fields+1)*sizeof(char*))); - end_row= row + rows; - columns= (MYSQL_ROW)end_row; - data->rows= rows; - data->data= row; if (!rows) goto return_ok; + if (thd->protocol == &thd->protocol_binary) + { + uint length; + row= (MYSQL_ROWS *)alloc_root(&data->alloc, rows * sizeof(MYSQL_ROWS)); + end_row= row + rows; + data->data= row; - for (prev_row= &row->next; row < end_row; prev_row= &row->next, row++) + for (prev_row= &row->next; row < end_row; prev_row= &row->next, row++) + { + *prev_row= row; + row->data= (MYSQL_ROW) src->load_str(&data->alloc, &length); + row->length= length; + } + } + else { - *prev_row= row; - row->data= columns; - MYSQL_ROW col_end= columns + data->fields; - for (; columns < col_end; columns++) - src->load_column(&data->alloc, columns); + row= (MYSQL_ROWS *)alloc_root(&data->alloc, + (uint) (rows * sizeof(MYSQL_ROWS) + + rows*(data->fields+1)*sizeof(char*))); + end_row= row + rows; + columns= (MYSQL_ROW)end_row; - *(columns++)= NULL; + data->data= row; + + for (prev_row= &row->next; row < end_row; prev_row= &row->next, row++) + { + *prev_row= row; + row->data= columns; + MYSQL_ROW col_end= columns + data->fields; + for (; columns < col_end; columns++) + src->load_column(&data->alloc, columns); + + *(columns++)= NULL; + } } *prev_row= NULL; data->embedded_info->prev_ptr= prev_row; diff --git a/libmysqld/emb_qcache.h b/libmysqld/emb_qcache.h index 6e320fbd967..a303273f7de 100644 --- a/libmysqld/emb_qcache.h +++ b/libmysqld/emb_qcache.h @@ -58,7 +58,7 @@ public: data_end= cur_data + (block->used-headers_len); } - void store_char(char c); + void store_uchar(uchar c); void store_short(ushort s); void store_int(uint i); void store_ll(ulonglong ll); @@ -66,7 +66,7 @@ public: void store_str(const char *str, uint str_len); void store_safe_str(const char *str, uint str_len); - char load_char(); + uchar load_uchar(); ushort load_short(); uint load_int(); ulonglong load_ll(); diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index aa75cde0525..7da7b3c0cf4 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -1,4 +1,4 @@ -DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t1, t2; select format(1.5555,0),format(123.5555,1),format(1234.5555,2),format(12345.55555,3),format(123456.5555,4),format(1234567.5555,5),format("12345.2399",2); format(1.5555,0) format(123.5555,1) format(1234.5555,2) format(12345.55555,3) format(123456.5555,4) format(1234567.5555,5) format("12345.2399",2) 2 123.6 1,234.56 12,345.556 123,456.5555 1,234,567.55550 12,345.24 diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index 4b7685c3633..426fa8992fe 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -3,7 +3,7 @@ # --disable_warnings -DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t1, t2; --enable_warnings select format(1.5555,0),format(123.5555,1),format(1234.5555,2),format(12345.55555,3),format(123456.5555,4),format(1234567.5555,5),format("12345.2399",2); -- cgit v1.2.1 From 2850d7bd04af31b733da0402ef02c27764259875 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Dec 2007 13:07:28 +0400 Subject: merging --- mysql-test/r/federated.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 08270241ff3..00287338459 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1955,7 +1955,6 @@ a 2 DROP TABLE federated.t1; DROP TABLE federated.t1; -End of 5.1 tests create table t1 (a varchar(256)); drop view if exists v1; create view v1 as select a from t1; @@ -2067,6 +2066,7 @@ select 1 from t1 order by a; drop table t1; drop table t1; drop view v1; +End of 5.1 tests DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; -- cgit v1.2.1 From 58f10e554aa2ce340e853be1006e1f411f89f09d Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 1 Dec 2007 19:55:06 +0100 Subject: Bug#31177: Server variables can't be set to their current values 5.1+ specific fixes (plugins etc.) include/my_getopt.h: make both ull and ll global mysql-test/r/index_merge_myisam.result: we throw warnings to the client, yea, verily mysql-test/r/innodb.result: we throw warnings to the client, yea, verily mysql-test/r/variables.result: we throw warnings to the client, yea, verily mysql-test/t/variables.test: correct result, is multiple of variable's block_size now mysys/my_getopt.c: export getopt_ll_limit_value(), check for integer wrap-around in it, same as in ull variant. Only print warnings to reporter when caller didn't ask for diagnostics, otherwise assume caller will handle any warnings (id est, throw them client-wards) sql/mysqld.cc: correct signedness of "concurrent-insert" sql/sql_plugin.cc: Throw sys-var out-of-range warnings client-wards for plugins, too. --- include/my_getopt.h | 3 ++- mysql-test/r/index_merge_myisam.result | 2 ++ mysql-test/r/innodb.result | 4 +-- mysql-test/r/variables.result | 34 +++++++++++++++++++++--- mysql-test/t/variables.test | 2 +- mysys/my_getopt.c | 48 +++++++++++++++++++++++++--------- sql/mysqld.cc | 2 +- sql/sql_plugin.cc | 35 ++++++++++++++++++++++--- 8 files changed, 104 insertions(+), 26 deletions(-) diff --git a/include/my_getopt.h b/include/my_getopt.h index c0101fd4cdb..14f8e6df95b 100644 --- a/include/my_getopt.h +++ b/include/my_getopt.h @@ -75,7 +75,8 @@ extern void my_getopt_register_get_addr(uchar ** (*func_addr)(const char *, uint const struct my_option *)); ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, - bool *fixed); + bool *fix); +longlong getopt_ll_limit_value(longlong, const struct my_option *,bool *fix); my_bool getopt_compare_strings(const char *s, const char *t, uint length); C_MODE_END diff --git a/mysql-test/r/index_merge_myisam.result b/mysql-test/r/index_merge_myisam.result index 9d7d06f7f1b..1827871861e 100644 --- a/mysql-test/r/index_merge_myisam.result +++ b/mysql-test/r/index_merge_myisam.result @@ -342,6 +342,8 @@ create table t4 (a int); insert into t4 values (1),(4),(3); set @save_join_buffer_size=@@join_buffer_size; set join_buffer_size= 4000; +Warnings: +Warning 1292 Truncated incorrect join_buffer_size value: '4000' explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) from t0 as A force index(i1,i2), t0 as B force index (i1,i2) where (A.key1 < 500000 or A.key2 < 3) diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index c932be6415e..2a3ef37d7a9 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -1793,7 +1793,7 @@ Variable_name Value innodb_thread_concurrency 8 set global innodb_thread_concurrency=1001; Warnings: -Warning 1292 Truncated incorrect innodb_thread_concurrency value: '1001' +Warning 1292 Truncated incorrect thread_concurrency value: '1001' show variables like "innodb_thread_concurrency"; Variable_name Value innodb_thread_concurrency 1000 @@ -1814,7 +1814,7 @@ Variable_name Value innodb_concurrency_tickets 1000 set global innodb_concurrency_tickets=0; Warnings: -Warning 1292 Truncated incorrect innodb_concurrency_tickets value: '0' +Warning 1292 Truncated incorrect concurrency_tickets value: '0' show variables like "innodb_concurrency_tickets"; Variable_name Value innodb_concurrency_tickets 1 diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index bdaec70021b..e57f82aca26 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -279,6 +279,8 @@ NET_READ_TIMEOUT 600 NET_RETRY_COUNT 10 NET_WRITE_TIMEOUT 500 set net_buffer_length=1; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '1' show variables like 'net_buffer_length'; Variable_name Value net_buffer_length 1024 @@ -312,14 +314,14 @@ show variables like '%alloc%'; Variable_name Value query_alloc_block_size 8192 query_prealloc_size 8192 -range_alloc_block_size 2048 +range_alloc_block_size 4096 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 select * from information_schema.session_variables where variable_name like '%alloc%' order by 1; VARIABLE_NAME VARIABLE_VALUE QUERY_ALLOC_BLOCK_SIZE 8192 QUERY_PREALLOC_SIZE 8192 -RANGE_ALLOC_BLOCK_SIZE 2048 +RANGE_ALLOC_BLOCK_SIZE 4096 TRANSACTION_ALLOC_BLOCK_SIZE 8192 TRANSACTION_PREALLOC_SIZE 4096 set @@range_alloc_block_size=1024*16; @@ -351,14 +353,14 @@ show variables like '%alloc%'; Variable_name Value query_alloc_block_size 8192 query_prealloc_size 8192 -range_alloc_block_size 2048 +range_alloc_block_size 4096 transaction_alloc_block_size 8192 transaction_prealloc_size 4096 select * from information_schema.session_variables where variable_name like '%alloc%' order by 1; VARIABLE_NAME VARIABLE_VALUE QUERY_ALLOC_BLOCK_SIZE 8192 QUERY_PREALLOC_SIZE 8192 -RANGE_ALLOC_BLOCK_SIZE 2048 +RANGE_ALLOC_BLOCK_SIZE 4096 TRANSACTION_ALLOC_BLOCK_SIZE 8192 TRANSACTION_PREALLOC_SIZE 4096 SELECT @@version LIKE 'non-existent'; @@ -416,6 +418,8 @@ select @@autocommit, @@big_tables; @@autocommit @@big_tables 1 1 set global binlog_cache_size=100; +Warnings: +Warning 1292 Truncated incorrect binlog_cache_size value: '100' set bulk_insert_buffer_size=100; set character set cp1251_koi8; set character set default; @@ -444,6 +448,8 @@ set global flush_time=100; set insert_id=1; set interactive_timeout=100; set join_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect join_buffer_size value: '100' set last_insert_id=1; set global local_infile=1; set long_query_time=0.000001; @@ -456,12 +462,20 @@ select @@long_query_time; 100.000001 set low_priority_updates=1; set max_allowed_packet=100; +Warnings: +Warning 1292 Truncated incorrect max_allowed_packet value: '100' set global max_binlog_cache_size=100; +Warnings: +Warning 1292 Truncated incorrect max_binlog_cache_size value: '100' set global max_binlog_size=100; +Warnings: +Warning 1292 Truncated incorrect max_binlog_size value: '100' set global max_connect_errors=100; set global max_connections=100; set global max_delayed_threads=100; set max_heap_table_size=100; +Warnings: +Warning 1292 Truncated incorrect max_heap_table_size value: '100' set max_join_size=100; set max_sort_length=100; set max_tmp_tables=100; @@ -472,17 +486,25 @@ select @@max_user_connections; set global max_write_lock_count=100; set myisam_sort_buffer_size=100; set net_buffer_length=100; +Warnings: +Warning 1292 Truncated incorrect net_buffer_length value: '100' set net_read_timeout=100; set net_write_timeout=100; set global query_cache_limit=100; set global query_cache_size=100; set global query_cache_type=demand; set read_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect read_buffer_size value: '100' set read_rnd_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect read_rnd_buffer_size value: '100' set global rpl_recovery_rank=100; set global server_id=100; set global slow_launch_time=100; set sort_buffer_size=100; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '100' set @@max_sp_recursion_depth=10; select @@max_sp_recursion_depth; @@max_sp_recursion_depth @@ -522,6 +544,8 @@ set storage_engine=myisam; set global thread_cache_size=100; set timestamp=1, timestamp=default; set tmp_table_size=100; +Warnings: +Warning 1292 Truncated incorrect tmp_table_size value: '100' set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; @@ -691,6 +715,8 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'MYI VARIABLE_NAME VARIABLE_VALUE MYISAM_DATA_POINTER_SIZE 7 SET GLOBAL table_open_cache=-1; +Warnings: +Warning 1292 Truncated incorrect table_open_cache value: '0' SHOW VARIABLES LIKE 'table_open_cache'; Variable_name Value table_open_cache 1 diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index c61972be3eb..45cd986aa6d 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -141,7 +141,7 @@ set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; set GLOBAL myisam_max_sort_file_size=default; ---replace_result 2147482624 FILE_SIZE 2146435072 FILE_SIZE +--replace_result 2147483647 FILE_SIZE 2146435072 FILE_SIZE show variables like 'myisam_max_sort_file_size'; --replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE select * from information_schema.session_variables where variable_name like 'myisam_max_sort_file_size'; diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index af48e9cb0df..3c0faf0c0af 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -34,7 +34,6 @@ my_bool getopt_compare_strings(const char *s, const char *t, uint length); static longlong getopt_ll(char *arg, const struct my_option *optp, int *err); -static longlong getopt_ll_limit_value(longlong, const struct my_option *); static ulonglong getopt_ull(char *arg, const struct my_option *optp, int *err); static double getopt_double(char *arg, const struct my_option *optp, int *err); @@ -87,7 +86,7 @@ static void default_reporter(enum loglevel level, fprintf(stderr, "%s", "Info: "); vfprintf(stderr, format, args); va_end(args); - fputs('\n', stderr); + fputc('\n', stderr); fflush(stderr); } @@ -785,7 +784,7 @@ static longlong eval_num_suffix(char *argument, int *error, char *option_name) static longlong getopt_ll(char *arg, const struct my_option *optp, int *err) { longlong num=eval_num_suffix(arg, err, (char*) optp->name); - return getopt_ll_limit_value(num, optp); + return getopt_ll_limit_value(num, optp, NULL); } /* @@ -795,11 +794,11 @@ static longlong getopt_ll(char *arg, const struct my_option *optp, int *err) Returns "fixed" value. */ -static longlong getopt_ll_limit_value(longlong num, - const struct my_option *optp) +longlong getopt_ll_limit_value(longlong num, const struct my_option *optp, + bool *fix) { longlong old= num; - bool trunc= FALSE; + bool adjusted= FALSE; char buf1[255], buf2[255]; ulonglong block_size= (optp->block_size ? (ulonglong) optp->block_size : 1L); @@ -807,7 +806,29 @@ static longlong getopt_ll_limit_value(longlong num, optp->max_value) /* if max value is not set -> no upper limit */ { num= (ulonglong) optp->max_value; - trunc= TRUE; + adjusted= TRUE; + } + + switch ((optp->var_type & GET_TYPE_MASK)) { + case GET_INT: + if (num > (longlong) INT_MAX) + { + num= ((longlong) INT_MAX); + adjusted= TRUE; + } + break; + case GET_LONG: +#if SIZEOF_LONG < SIZEOF_LONG_LONG + if (num > (longlong) LONG_MAX) + { + num= ((longlong) LONG_MAX); + adjusted= TRUE; + } +#endif + break; + default: + DBUG_ASSERT((optp->var_type & GET_TYPE_MASK) == GET_LL); + break; } num= ((num - optp->sub_size) / block_size); @@ -816,10 +837,12 @@ static longlong getopt_ll_limit_value(longlong num, if (num < optp->min_value) { num= optp->min_value; - trunc= TRUE; + adjusted= TRUE; } - if (trunc) + if (fix) + *fix= adjusted; + else if (adjusted) my_getopt_error_reporter(WARNING_LEVEL, "option '%s': signed value %s adjusted to %s", optp->name, llstr(old, buf1), llstr(num, buf2)); @@ -888,14 +911,13 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, adjusted= TRUE; } - if (adjusted) + if (fix) + *fix= adjusted; + else if (adjusted) my_getopt_error_reporter(WARNING_LEVEL, "option '%s': unsigned value %s adjusted to %s", optp->name, ullstr(old, buf1), ullstr(num, buf2)); - if (fix) - *fix= adjusted; - return num; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 9d753f905dc..b81ccfc5db9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5216,7 +5216,7 @@ struct my_option my_long_options[] = {"concurrent-insert", OPT_CONCURRENT_INSERT, "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0", (uchar**) &myisam_concurrent_insert, (uchar**) &myisam_concurrent_insert, - 0, GET_LONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, + 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", (uchar**) &opt_console, (uchar**) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 2af528f6699..07bf313067a 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1874,11 +1874,20 @@ err: static int check_func_int(THD *thd, struct st_mysql_sys_var *var, void *save, st_mysql_value *value) { + bool fixed; long long tmp; struct my_option options; value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); - *(int *)save= (int) getopt_ull_limit_value(tmp, &options); + *(int *)save= (int) getopt_ull_limit_value(tmp, &options, &fixed); + if (fixed) + { + char buf[22]; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), var->name, + ullstr(tmp, buf)); + } return (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES) && (*(int *)save != (int) tmp); } @@ -1887,24 +1896,42 @@ static int check_func_int(THD *thd, struct st_mysql_sys_var *var, static int check_func_long(THD *thd, struct st_mysql_sys_var *var, void *save, st_mysql_value *value) { + bool fixed; long long tmp; struct my_option options; value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); - *(long *)save= (long) getopt_ull_limit_value(tmp, &options); + *(long *)save= (long) getopt_ull_limit_value(tmp, &options, &fixed); + if (fixed) + { + char buf[22]; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), var->name, + ullstr(tmp, buf)); + } return (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES) && (*(long *)save != (long) tmp); } static int check_func_longlong(THD *thd, struct st_mysql_sys_var *var, - void *save, st_mysql_value *value) + void *save, st_mysql_value *value) { + bool fixed; long long tmp; struct my_option options; value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); - *(ulonglong *)save= getopt_ull_limit_value(tmp, &options); + *(ulonglong *)save= getopt_ull_limit_value(tmp, &options, &fixed); + if (fixed) + { + char buf[22]; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), var->name, + ullstr(tmp, buf)); + } return (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES) && (*(long long *)save != tmp); } -- cgit v1.2.1 From d892e68269c6ed0a1eefa43a8e238f8594c07c1a Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 2 Dec 2007 05:18:34 +0100 Subject: Bug#31177: Server variables can't be set to their current values extra 5.1 fixes mysql-test/t/order_fill_sortbuf-master.opt: If we set the variable below minimum, we'll get a warning at start-up that we don't want in the tests. --- mysql-test/t/order_fill_sortbuf-master.opt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/t/order_fill_sortbuf-master.opt b/mysql-test/t/order_fill_sortbuf-master.opt index 116494d4588..9aa3cc76221 100644 --- a/mysql-test/t/order_fill_sortbuf-master.opt +++ b/mysql-test/t/order_fill_sortbuf-master.opt @@ -1 +1 @@ ---set-variable=sort_buffer=0 +--set-variable=sort_buffer=32804 -- cgit v1.2.1 From 53f7db282dc982e48c0a944e9c3f68ebc3d26db3 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 3 Dec 2007 10:01:56 +0100 Subject: Bug#31177: Server variables can't be set to their current values additional fixes for 64-bit mysql-test/t/variables.test: replace 32-bit and 64-bit values mysys/my_getopt.c: 'mod' no longer used. on 64-bit, limit to (signed) (LONG)LONG_MAX to prevent badness in classes using longlong. --- mysql-test/t/variables.test | 2 +- mysys/my_getopt.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index be7e7c2b413..a30ba758c27 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -126,7 +126,7 @@ set GLOBAL query_cache_size=100000; set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; set GLOBAL myisam_max_sort_file_size=default; ---replace_result 2147482624 FILE_SIZE 2146435072 FILE_SIZE +--replace_result 9223372036853727232 FILE_SIZE 2146435072 FILE_SIZE show variables like 'myisam_max_sort_file_size'; set global net_retry_count=10, session net_retry_count=10; diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index f41e8166876..9174d91dce7 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -799,7 +799,7 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, bool *fix) { bool adjusted= FALSE; - ulonglong old= num, mod; + ulonglong old= num; char buf1[255], buf2[255]; if ((ulonglong) num > (ulonglong) optp->max_value && @@ -824,6 +824,8 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, num= ((ulonglong) ULONG_MAX); adjusted= TRUE; } +#else + num= min(num, LONG_MAX); #endif break; default: -- cgit v1.2.1 From b4a146a6ebe0ad14d40f241c0a37c9a43c7fd7f3 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 3 Dec 2007 10:08:58 +0100 Subject: Bug#30234: Unexpected behavior using DELETE with AS and USING Anti-patch. This patch undoes the previously pushed patch. It is null-merged in versions 5.1 and above since there the original patch is still desired. mysql-test/r/delete.result: Bug#30234: Anti-patch mysql-test/t/delete.test: Bug#30234: Anti-patch sql/sql_yacc.yy: Bug#30234: Anti-patch --- mysql-test/r/delete.result | 37 ------------------------------------- mysql-test/t/delete.test | 44 -------------------------------------------- sql/sql_yacc.yy | 19 ++----------------- 3 files changed, 2 insertions(+), 98 deletions(-) diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result index 5084498c01c..27eee6ef578 100644 --- a/mysql-test/r/delete.result +++ b/mysql-test/r/delete.result @@ -234,40 +234,3 @@ ERROR 42S22: Unknown column 't2.x' in 'order clause' DELETE FROM t1 ORDER BY (SELECT x); ERROR 42S22: Unknown column 'x' in 'field list' DROP TABLE t1; -CREATE TABLE t1 ( -a INT -); -CREATE TABLE t2 ( -a INT -); -CREATE DATABASE db1; -CREATE TABLE db1.t1 ( -a INT -); -INSERT INTO db1.t1 (a) SELECT * FROM t1; -CREATE DATABASE db2; -CREATE TABLE db2.t1 ( -a INT -); -INSERT INTO db2.t1 (a) SELECT * FROM t2; -DELETE FROM t1 alias USING t1, t2 alias WHERE t1.a = alias.a; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING t1, t2 alias WHERE t1.a = alias.a' at line 1 -DELETE FROM alias USING t1, t2 alias WHERE t1.a = alias.a; -DELETE FROM t1, alias USING t1, t2 alias WHERE t1.a = alias.a; -DELETE FROM t1, t2 USING t1, t2 alias WHERE t1.a = alias.a; -ERROR 42S02: Unknown table 't2' in MULTI DELETE -DELETE FROM db1.t1 alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a' at line 1 -DELETE FROM alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; -ERROR 42S02: Unknown table 'alias' in MULTI DELETE -DELETE FROM db2.alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; -DELETE FROM t1 USING t1 WHERE a = 1; -SELECT * FROM t1; -a -DELETE FROM t1 alias USING t1 alias WHERE a = 2; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING t1 alias WHERE a = 2' at line 1 -SELECT * FROM t1; -a -DROP TABLE t1, t2; -DROP DATABASE db1; -DROP DATABASE db2; diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test index 8a03cb6c715..495b0c0b001 100644 --- a/mysql-test/t/delete.test +++ b/mysql-test/t/delete.test @@ -233,47 +233,3 @@ DELETE FROM t1 ORDER BY t2.x; DELETE FROM t1 ORDER BY (SELECT x); DROP TABLE t1; - -# -# Bug #30234: Unexpected behavior using DELETE with AS and USING -# ' -CREATE TABLE t1 ( - a INT -); - -CREATE TABLE t2 ( - a INT -); - -CREATE DATABASE db1; -CREATE TABLE db1.t1 ( - a INT -); -INSERT INTO db1.t1 (a) SELECT * FROM t1; - -CREATE DATABASE db2; -CREATE TABLE db2.t1 ( - a INT -); -INSERT INTO db2.t1 (a) SELECT * FROM t2; - ---error ER_PARSE_ERROR -DELETE FROM t1 alias USING t1, t2 alias WHERE t1.a = alias.a; -DELETE FROM alias USING t1, t2 alias WHERE t1.a = alias.a; -DELETE FROM t1, alias USING t1, t2 alias WHERE t1.a = alias.a; ---error ER_UNKNOWN_TABLE -DELETE FROM t1, t2 USING t1, t2 alias WHERE t1.a = alias.a; ---error ER_PARSE_ERROR -DELETE FROM db1.t1 alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; ---error ER_UNKNOWN_TABLE -DELETE FROM alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; -DELETE FROM db2.alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; -DELETE FROM t1 USING t1 WHERE a = 1; -SELECT * FROM t1; ---error ER_PARSE_ERROR -DELETE FROM t1 alias USING t1 alias WHERE a = 2; -SELECT * FROM t1; - -DROP TABLE t1, t2; -DROP DATABASE db1; -DROP DATABASE db2; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 82100b3d3dd..c4bc4616841 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1162,8 +1162,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); field_opt_list opt_binary table_lock_list table_lock ref_list opt_on_delete opt_on_delete_list opt_on_delete_item use opt_delete_options opt_delete_option varchar nchar nvarchar - opt_outer table_list table_name table_alias_ref_list table_alias_ref - opt_option opt_place + opt_outer table_list table_name opt_option opt_place opt_attribute opt_attribute_list attribute column_list column_list_id opt_column_list grant_privileges grant_ident grant_list grant_option object_privilege object_privilege_list user_list rename_list @@ -6554,20 +6553,6 @@ table_name: } ; -table_alias_ref_list: - table_alias_ref - | table_alias_ref_list ',' table_alias_ref; - -table_alias_ref: - table_ident - { - if (!Select->add_table_to_list(YYTHD, $1, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, - Lex->lock_option )) - MYSQL_YYABORT; - } - ; - if_exists: /* empty */ { $$= 0; } | IF EXISTS { $$= 1; } @@ -6838,7 +6823,7 @@ single_multi: if (multi_delete_set_locks_and_link_aux_tables(Lex)) MYSQL_YYABORT; } - | FROM table_alias_ref_list + | FROM table_wild_list { mysql_init_multi_delete(Lex); } USING join_table_list where_clause { -- cgit v1.2.1 From 062af09eb5c7afe6d279f925764df1023e3a2e17 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 3 Dec 2007 11:39:49 +0100 Subject: Bug#30234: Post-null-merge fix Restores files that were automerged during null merge. mysql-test/r/delete.result: Bug#30234: Post-null-merge fix mysql-test/t/delete.test: Bug#30234: Post-null-merge fix --- mysql-test/r/delete.result | 37 +++++++++++++++++++++++++++++++++++++ mysql-test/t/delete.test | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/mysql-test/r/delete.result b/mysql-test/r/delete.result index 27eee6ef578..5084498c01c 100644 --- a/mysql-test/r/delete.result +++ b/mysql-test/r/delete.result @@ -234,3 +234,40 @@ ERROR 42S22: Unknown column 't2.x' in 'order clause' DELETE FROM t1 ORDER BY (SELECT x); ERROR 42S22: Unknown column 'x' in 'field list' DROP TABLE t1; +CREATE TABLE t1 ( +a INT +); +CREATE TABLE t2 ( +a INT +); +CREATE DATABASE db1; +CREATE TABLE db1.t1 ( +a INT +); +INSERT INTO db1.t1 (a) SELECT * FROM t1; +CREATE DATABASE db2; +CREATE TABLE db2.t1 ( +a INT +); +INSERT INTO db2.t1 (a) SELECT * FROM t2; +DELETE FROM t1 alias USING t1, t2 alias WHERE t1.a = alias.a; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING t1, t2 alias WHERE t1.a = alias.a' at line 1 +DELETE FROM alias USING t1, t2 alias WHERE t1.a = alias.a; +DELETE FROM t1, alias USING t1, t2 alias WHERE t1.a = alias.a; +DELETE FROM t1, t2 USING t1, t2 alias WHERE t1.a = alias.a; +ERROR 42S02: Unknown table 't2' in MULTI DELETE +DELETE FROM db1.t1 alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a' at line 1 +DELETE FROM alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +ERROR 42S02: Unknown table 'alias' in MULTI DELETE +DELETE FROM db2.alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +DELETE FROM t1 USING t1 WHERE a = 1; +SELECT * FROM t1; +a +DELETE FROM t1 alias USING t1 alias WHERE a = 2; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'alias USING t1 alias WHERE a = 2' at line 1 +SELECT * FROM t1; +a +DROP TABLE t1, t2; +DROP DATABASE db1; +DROP DATABASE db2; diff --git a/mysql-test/t/delete.test b/mysql-test/t/delete.test index 495b0c0b001..8a03cb6c715 100644 --- a/mysql-test/t/delete.test +++ b/mysql-test/t/delete.test @@ -233,3 +233,47 @@ DELETE FROM t1 ORDER BY t2.x; DELETE FROM t1 ORDER BY (SELECT x); DROP TABLE t1; + +# +# Bug #30234: Unexpected behavior using DELETE with AS and USING +# ' +CREATE TABLE t1 ( + a INT +); + +CREATE TABLE t2 ( + a INT +); + +CREATE DATABASE db1; +CREATE TABLE db1.t1 ( + a INT +); +INSERT INTO db1.t1 (a) SELECT * FROM t1; + +CREATE DATABASE db2; +CREATE TABLE db2.t1 ( + a INT +); +INSERT INTO db2.t1 (a) SELECT * FROM t2; + +--error ER_PARSE_ERROR +DELETE FROM t1 alias USING t1, t2 alias WHERE t1.a = alias.a; +DELETE FROM alias USING t1, t2 alias WHERE t1.a = alias.a; +DELETE FROM t1, alias USING t1, t2 alias WHERE t1.a = alias.a; +--error ER_UNKNOWN_TABLE +DELETE FROM t1, t2 USING t1, t2 alias WHERE t1.a = alias.a; +--error ER_PARSE_ERROR +DELETE FROM db1.t1 alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +--error ER_UNKNOWN_TABLE +DELETE FROM alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +DELETE FROM db2.alias USING db1.t1, db2.t1 alias WHERE db1.t1.a = alias.a; +DELETE FROM t1 USING t1 WHERE a = 1; +SELECT * FROM t1; +--error ER_PARSE_ERROR +DELETE FROM t1 alias USING t1 alias WHERE a = 2; +SELECT * FROM t1; + +DROP TABLE t1, t2; +DROP DATABASE db1; +DROP DATABASE db2; -- cgit v1.2.1 From 4c85c8dd504f38145f0cc2b0651bac6146caa7e9 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 3 Dec 2007 12:43:50 +0100 Subject: ndb - bug#26457 Fix bug in bug fix, causing random test-failure --- storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 217c6f38ade..4d54007ee9c 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -5962,7 +5962,7 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal) jam(); ndbout_c("resending GSN_MASTER_LCPREQ"); sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal, - signal->getLength(), 50); + 50, signal->getLength()); return; } Uint32 failedNodeId = req->failedNodeId; -- cgit v1.2.1 From c124985e6d2be89719b08833d546ba19e3b21a8a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 3 Dec 2007 14:31:17 +0100 Subject: ndb - testNodeRestart -n Bug27466 T1 make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race, error insert causes 5s delay) storage/ndb/src/kernel/blocks/backup/Backup.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/src/kernel/blocks/suma/Suma.cpp: make local INCL_NODEREQ/CONF more robust (to remove testprg introduced race) storage/ndb/test/ndbapi/testNodeRestart.cpp: soem more printout --- storage/ndb/src/kernel/blocks/backup/Backup.cpp | 5 +-- storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 5 +-- storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 37 ++++++++++++++++++----- storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 5 +-- storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 8 +++-- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 5 +-- storage/ndb/test/ndbapi/testNodeRestart.cpp | 2 ++ 7 files changed, 48 insertions(+), 19 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 64e2c41aa69..45501bf50d5 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -1026,8 +1026,9 @@ Backup::execINCL_NODEREQ(Signal* signal) break; }//if }//for - signal->theData[0] = reference(); - sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); + signal->theData[0] = inclNode; + signal->theData[1] = reference(); + sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB); } /***************************************************************************** diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 569958a6aa9..7ced078144a 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -3825,8 +3825,9 @@ void Dbdict::execINCL_NODEREQ(Signal* signal) c_nodes.getPtr(nodePtr); ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD); nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE; - signal->theData[0] = reference(); - sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB); + signal->theData[0] = nodePtr.i; + signal->theData[1] = reference(); + sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB); c_aliveNodes.set(nodePtr.i); }//execINCL_NODEREQ() diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index a081166f2e3..bbacb300089 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -2135,12 +2135,9 @@ void Dbdih::gcpBlockedLab(Signal* signal) /*---------------------------------------------------------------------------*/ void Dbdih::execINCL_NODECONF(Signal* signal) { - Uint32 TsendNodeId; - Uint32 TstartNode_or_blockref; - jamEntry(); - TstartNode_or_blockref = signal->theData[0]; - TsendNodeId = signal->theData[1]; + Uint32 TstartNode = signal->theData[0]; + Uint32 TsendNodeId_or_blockref = signal->theData[1]; Uint32 blocklist[6]; blocklist[0] = clocallqhblockref; @@ -2152,9 +2149,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal) for (Uint32 i = 0; blocklist[i] != 0; i++) { - if (TstartNode_or_blockref == blocklist[i]) + if (TsendNodeId_or_blockref == blocklist[i]) { jam(); + + if (TstartNode != c_nodeStartSlave.nodeId) + { + jam(); + warningEvent("Recevied INCL_NODECONF for %u from %s" + " while %u is starting", + TstartNode, + getBlockName(refToBlock(TsendNodeId_or_blockref)), + c_nodeStartSlave.nodeId); + return; + } + if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE && blocklist[i+1] != 0) { @@ -2182,10 +2191,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal) } } } + + if (c_nodeStartMaster.startNode != TstartNode) + { + jam(); + warningEvent("Recevied INCL_NODECONF for %u from %u" + " while %u is starting", + TstartNode, + TsendNodeId_or_blockref, + c_nodeStartMaster.startNode); + return; + } ndbrequire(cmasterdihref = reference()); - receiveLoopMacro(INCL_NODEREQ, TsendNodeId); - + receiveLoopMacro(INCL_NODEREQ, TsendNodeId_or_blockref); + CRASH_INSERTION(7128); /*-------------------------------------------------------------------------*/ // Now that we have included the starting node in the node lists in the @@ -13006,6 +13026,7 @@ void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr) void Dbdih::nodeResetStart() { jam(); + c_nodeStartSlave.nodeId = 0; c_nodeStartMaster.startNode = RNIL; c_nodeStartMaster.failNr = cfailurenr; c_nodeStartMaster.activeState = false; diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 6efa1b1b116..83d38595c1f 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -494,8 +494,9 @@ void Dblqh::execINCL_NODEREQ(Signal* signal) cnodeStatus[i] = ZNODE_UP; }//if }//for - signal->theData[0] = cownref; - sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB); + signal->theData[0] = nodeId; + signal->theData[1] = cownref; + sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB); return; }//Dblqh::execINCL_NODEREQ() diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index ce20059e663..cffb4b22e4d 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -310,9 +310,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal) hostptr.i = signal->theData[1]; ptrCheckGuard(hostptr, chostFilesize, hostRecord); hostptr.p->hostStatus = HS_ALIVE; - signal->theData[0] = cownref; c_alive_nodes.set(hostptr.i); + signal->theData[0] = hostptr.i; + signal->theData[1] = cownref; + if (ERROR_INSERTED(8039)) { CLEAR_ERROR_INSERT_VALUE; @@ -321,11 +323,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal) sendSignal(numberToRef(CMVMI, hostptr.i), GSN_NDB_TAMPER, signal, 1, JBB); signal->theData[0] = save; - sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 1); + sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 2); return; } - sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB); + sendSignal(tblockref, GSN_INCL_NODECONF, signal, 2, JBB); } void Dbtc::execREAD_NODESREF(Signal* signal) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index af1334c57fc..113b63a19d3 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -821,8 +821,9 @@ Suma::execINCL_NODEREQ(Signal* signal){ ndbrequire(!c_alive_nodes.get(nodeId)); c_alive_nodes.set(nodeId); - signal->theData[0] = reference(); - sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); + signal->theData[0] = nodeId; + signal->theData[1] = reference(); + sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB); } void diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 2a5febb7ae9..751134c43c5 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1590,6 +1590,8 @@ runBug27466(NDBT_Context* ctx, NDBT_Step* step) node2 = res.getDbNodeId(rand() % res.getNumDbNodes()); } + ndbout_c("nodes %u %u", node1, node2); + if (res.restartOneDbNode(node1, false, true, true)) return NDBT_FAILED; -- cgit v1.2.1 From 09bf5b91693110bc91fa949bb2d85a0ac587d07e Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 4 Dec 2007 01:17:52 +0100 Subject: Bug#31177: Server variables can't be set to their current values additional fixes for 64-bit --- Merge mysql.com:/misc/mysql/31177/50-31177 into mysql.com:/misc/mysql/31177/51-31177 --- Bug#31177: Server variables can't be set to their current values additional 5.1 fixes (for plugins) mysql-test/t/variables.test: replace 32-bit and 64-bit values --- manual merge mysys/my_getopt.c: 'mod' no longer used. on 64-bit, limit to (signed) (LONG)LONG_MAX to prevent badness in classes using longlong. sql/sql_plugin.cc: handle signedness in plugin-vars so we won't hit an assert() in getopt_*_limit_value() --- mysql-test/t/variables.test | 2 +- mysys/my_getopt.c | 4 +++- sql/sql_plugin.cc | 23 +++++++++++++++++++++-- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index b28311aee3d..c1580390f63 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -141,7 +141,7 @@ set GLOBAL myisam_max_sort_file_size=2000000; show global variables like 'myisam_max_sort_file_size'; select * from information_schema.global_variables where variable_name like 'myisam_max_sort_file_size'; set GLOBAL myisam_max_sort_file_size=default; ---replace_result 2147483647 FILE_SIZE 2146435072 FILE_SIZE +--replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE show variables like 'myisam_max_sort_file_size'; --replace_result 2147483647 FILE_SIZE 9223372036854775807 FILE_SIZE select * from information_schema.session_variables where variable_name like 'myisam_max_sort_file_size'; diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 3c0faf0c0af..7691472b76a 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -867,7 +867,7 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, bool *fix) { bool adjusted= FALSE; - ulonglong old= num, mod; + ulonglong old= num; char buf1[255], buf2[255]; if ((ulonglong) num > (ulonglong) optp->max_value && @@ -892,6 +892,8 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, num= ((ulonglong) ULONG_MAX); adjusted= TRUE; } +#else + num= min(num, LONG_MAX); #endif break; default: diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 08b33c4502c..002fb5be31e 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1880,7 +1880,13 @@ static int check_func_int(THD *thd, struct st_mysql_sys_var *var, struct my_option options; value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); - *(int *)save= (int) getopt_ull_limit_value(tmp, &options, &fixed); + + if (var->flags & PLUGIN_VAR_UNSIGNED) + *(uint *)save= (uint) getopt_ull_limit_value((ulonglong) tmp, &options, + &fixed); + else + *(int *)save= (int) getopt_ll_limit_value(tmp, &options, &fixed); + if (fixed) { char buf[22]; @@ -1902,7 +1908,13 @@ static int check_func_long(THD *thd, struct st_mysql_sys_var *var, struct my_option options; value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); - *(long *)save= (long) getopt_ull_limit_value(tmp, &options, &fixed); + + if (var->flags & PLUGIN_VAR_UNSIGNED) + *(ulong *)save= (ulong) getopt_ull_limit_value((ulonglong) tmp, &options, + &fixed); + else + *(long *)save= (long) getopt_ll_limit_value(tmp, &options, &fixed); + if (fixed) { char buf[22]; @@ -1925,6 +1937,13 @@ static int check_func_longlong(THD *thd, struct st_mysql_sys_var *var, value->val_int(value, &tmp); plugin_opt_set_limits(&options, var); *(ulonglong *)save= getopt_ull_limit_value(tmp, &options, &fixed); + + if (var->flags & PLUGIN_VAR_UNSIGNED) + *(ulonglong *)save= getopt_ull_limit_value((ulonglong) tmp, &options, + &fixed); + else + *(longlong *)save= getopt_ll_limit_value(tmp, &options, &fixed); + if (fixed) { char buf[22]; -- cgit v1.2.1 From 7d89be3f4f37c5206256c04905277297e2313a37 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 4 Dec 2007 12:48:11 +0400 Subject: test case fix --- mysql-test/r/information_schema.result | 1 + mysql-test/t/information_schema.test | 1 + 2 files changed, 2 insertions(+) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index a2c337e00df..0cb4b10a789 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1618,4 +1618,5 @@ show events from information_schema; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation show events where Db= 'information_schema'; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation +use test; End of 5.1 tests. diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 0d53c404fa9..3d3aeec16b9 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -1245,5 +1245,6 @@ use information_schema; show events; show events from information_schema; show events where Db= 'information_schema'; +use test; --echo End of 5.1 tests. -- cgit v1.2.1 From 4618d68d6d85141cd2422c892ed5053c72aa097a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 6 Dec 2007 01:28:01 +0100 Subject: Bug#31177: Server variables can't be set to their current values additional fixes for BDB and correct assignment of both signed and unsigned 64-bit data to unsigned system variables mysql-test/r/ps_2myisam.result: account for UNSIGNED_FLAG mysql-test/r/ps_3innodb.result: account for UNSIGNED_FLAG mysql-test/r/ps_4heap.result: account for UNSIGNED_FLAG mysql-test/r/ps_5merge.result: account for UNSIGNED_FLAG mysql-test/r/ps_6bdb.result: account for UNSIGNED_FLAG mysql-test/r/ps_7ndb.result: account for UNSIGNED_FLAG mysys/my_getopt.c: We have correct signed/unsigned information now, so we no longer need to err on the side of caution. sql/item_func.cc: Copy unsigned info over from entry so the item's data correctly describe it. sql/mysqld.cc: BDB log buffer size: default can't be less than minimum sql/set_var.cc: Handle signedness of in-values correctly when assigning to unsigned types, all the way up to 64-bit. Use handler from all three unsigned sysvar types. sql/set_var.h: thd_ulonglong: Override default check with one for unsigned types --- mysql-test/r/ps_2myisam.result | 16 ++++++++-------- mysql-test/r/ps_3innodb.result | 16 ++++++++-------- mysql-test/r/ps_4heap.result | 16 ++++++++-------- mysql-test/r/ps_5merge.result | 32 ++++++++++++++++---------------- mysql-test/r/ps_6bdb.result | 16 ++++++++-------- mysql-test/r/ps_7ndb.result | 16 ++++++++-------- mysys/my_getopt.c | 2 -- sql/item_func.cc | 2 ++ sql/mysqld.cc | 2 +- sql/set_var.cc | 32 ++++++++++++++++++++++++-------- sql/set_var.h | 1 + 11 files changed, 84 insertions(+), 67 deletions(-) diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index 57932a6c455..ac3ce99716c 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -1933,7 +1933,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1980,7 +1980,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2030,7 +2030,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2070,7 +2070,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2118,7 +2118,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2162,7 +2162,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2208,7 +2208,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2246,7 +2246,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index fd24c29d558..745b0ac53c9 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -1916,7 +1916,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1963,7 +1963,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2013,7 +2013,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2053,7 +2053,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2101,7 +2101,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2145,7 +2145,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2191,7 +2191,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2229,7 +2229,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index b4596ab85bc..7b6b89100bc 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -1917,7 +1917,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1964,7 +1964,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2014,7 +2014,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2054,7 +2054,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2102,7 +2102,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2146,7 +2146,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2192,7 +2192,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2230,7 +2230,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 18982db937a..74042d395c9 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -1853,7 +1853,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1900,7 +1900,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -1950,7 +1950,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1990,7 +1990,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2038,7 +2038,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2082,7 +2082,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2128,7 +2128,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2166,7 +2166,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -4875,7 +4875,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -4922,7 +4922,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -4972,7 +4972,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -5012,7 +5012,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -5060,7 +5060,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -5104,7 +5104,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -5150,7 +5150,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -5188,7 +5188,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result index 0e4086bc202..da12409257c 100644 --- a/mysql-test/r/ps_6bdb.result +++ b/mysql-test/r/ps_6bdb.result @@ -1916,7 +1916,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1963,7 +1963,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2013,7 +2013,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2053,7 +2053,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2101,7 +2101,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2145,7 +2145,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2191,7 +2191,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2229,7 +2229,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result index 7a20fb3146d..4cb315c408e 100644 --- a/mysql-test/r/ps_7ndb.result +++ b/mysql-test/r/ps_7ndb.result @@ -1916,7 +1916,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -1963,7 +1963,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2013,7 +2013,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2053,7 +2053,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2101,7 +2101,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2145,7 +2145,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 @@ -2191,7 +2191,7 @@ def @arg13 253 8192 10 Y 128 31 63 def @arg14 253 8192 19 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 8 Y 128 31 63 -def @arg17 253 20 4 Y 128 0 63 +def @arg17 253 20 4 Y 160 0 63 def @arg18 253 20 1 Y 128 0 63 def @arg19 253 20 1 Y 128 0 63 def @arg20 253 8192 1 Y 0 31 8 @@ -2229,7 +2229,7 @@ def @arg13 253 8192 0 Y 128 31 63 def @arg14 253 8192 0 Y 128 31 63 def @arg15 253 8192 19 Y 128 31 63 def @arg16 253 8192 0 Y 128 31 63 -def @arg17 253 20 0 Y 128 0 63 +def @arg17 253 20 0 Y 160 0 63 def @arg18 253 20 0 Y 128 0 63 def @arg19 253 20 0 Y 128 0 63 def @arg20 253 8192 0 Y 0 31 8 diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 9174d91dce7..3bb500616a1 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -824,8 +824,6 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, num= ((ulonglong) ULONG_MAX); adjusted= TRUE; } -#else - num= min(num, LONG_MAX); #endif break; default: diff --git a/sql/item_func.cc b/sql/item_func.cc index 2ee9973c785..019df9176c9 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -4597,6 +4597,8 @@ void Item_func_get_user_var::fix_length_and_dec() if (var_entry) { + unsigned_flag= var_entry->unsigned_flag; + collation.set(var_entry->collation); switch (var_entry->type) { case REAL_RESULT: diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3900f74da7e..23a05485796 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5679,7 +5679,7 @@ log and this option does nothing anymore.", {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, - GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ULONG_MAX, 0, 1024, 0}, + GET_ULONG, REQUIRED_ARG, 256*1024L, 256*1024L, ULONG_MAX, 0, 1024, 0}, {"bdb_max_lock", OPT_BDB_MAX_LOCK, "The maximum number of locks you can have active on a BDB table.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, diff --git a/sql/set_var.cc b/sql/set_var.cc index 84b3f92c1ca..58b9b93b17f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -115,6 +115,7 @@ static void fix_trans_mem_root(THD *thd, enum_var_type type); static void fix_server_id(THD *thd, enum_var_type type); static ulonglong fix_unsigned(THD *thd, ulonglong num, const struct my_option *option_limits); +static bool get_unsigned(THD *thd, set_var *var); static void throw_bounds_warning(THD *thd, const char *name, ulonglong num); static KEY_CACHE *create_key_cache(const char *name, uint length); void fix_sql_mode_var(THD *thd, enum_var_type type); @@ -1471,6 +1472,18 @@ static ulonglong fix_unsigned(THD *thd, ulonglong num, return out; } +static bool get_unsigned(THD *thd, set_var *var) +{ + if (var->value->unsigned_flag) + var->save_result.ulonglong_value= (ulonglong) var->value->val_int(); + else + { + longlong v= var->value->val_int(); + var->save_result.ulonglong_value= (ulonglong) ((v < 0) ? 0 : v); + } + return 0; +} + sys_var_long_ptr:: sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, @@ -1482,9 +1495,7 @@ sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, bool sys_var_long_ptr_global::check(THD *thd, set_var *var) { - longlong v= var->value->val_int(); - var->save_result.ulonglong_value= v < 0 ? 0 : v; - return 0; + return get_unsigned(thd, var); } bool sys_var_long_ptr_global::update(THD *thd, set_var *var) @@ -1497,9 +1508,9 @@ bool sys_var_long_ptr_global::update(THD *thd, set_var *var) { #if SIZEOF_LONG < SIZEOF_LONG_LONG /* Avoid overflows on 32 bit systems */ - if (tmp > (ulonglong) ~(ulong) 0) + if (tmp > ULONG_MAX) { - tmp= ((ulonglong) ~(ulong) 0); + tmp= ULONG_MAX; throw_bounds_warning(thd, name, var->save_result.ulonglong_value); } #endif @@ -1567,7 +1578,7 @@ byte *sys_var_enum::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) bool sys_var_thd_ulong::check(THD *thd, set_var *var) { - return (sys_var_thd::check(thd, var) || + return (get_unsigned(thd, var) || (check_func && (*check_func)(thd, var))); } @@ -1585,9 +1596,9 @@ bool sys_var_thd_ulong::update(THD *thd, set_var *var) if (option_limits) tmp= (ulong) fix_unsigned(thd, tmp, option_limits); #if SIZEOF_LONG < SIZEOF_LONG_LONG - else if (tmp > (ulonglong) ~(ulong) 0) + else if (tmp > ULONG_MAX) { - tmp= ((ulonglong) ~(ulong) 0); + tmp= ULONG_MAX; throw_bounds_warning(thd, name, var->save_result.ulonglong_value); } #endif @@ -1667,6 +1678,11 @@ byte *sys_var_thd_ha_rows::value_ptr(THD *thd, enum_var_type type, return (byte*) &(thd->variables.*offset); } +bool sys_var_thd_ulonglong::check(THD *thd, set_var *var) +{ + return get_unsigned(thd, var); +} + bool sys_var_thd_ulonglong::update(THD *thd, set_var *var) { ulonglong tmp= var->save_result.ulonglong_value; diff --git a/sql/set_var.h b/sql/set_var.h index 7b3f864f44c..93402e21258 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -365,6 +365,7 @@ public: void set_default(THD *thd, enum_var_type type); SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + bool check(THD *thd, set_var *var); bool check_default(enum_var_type type) { return type == OPT_GLOBAL && !option_limits; -- cgit v1.2.1 From b6c0c1754c5d378de3d37ca02b21df7dfaa3b229 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 6 Dec 2007 16:39:42 +0400 Subject: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). ha_partition::update_create_info() just calls update_create_info of a first partition, so only get the autoincrement maximum of the first partition, so SHOW CREATE TABLE can show small AUTO_INCREMENT parameters. Fixed by implementing ha_partition::update_create_info() in a way other handlers work. HA_ARCHIVE:stats.auto_increment handling made consistent with other engines mysql-test/r/archive.result: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). test result fixed mysql-test/r/partition.result: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). test result mysql-test/suite/rpl/r/rpl_innodb_bug28430.result: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). test result fixed mysql-test/t/partition.test: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). test added as rpl_innodb_bug28430 is INNOBASE specific while the bug is not sql/ha_partition.cc: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). ha_partition::update_create_info() implemented storage/archive/ha_archive.cc: Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table). ha_archive.stats.auto_increment now contains next (not last used) value of auto_increment to be consistent with other engines --- mysql-test/r/archive.result | 2 +- mysql-test/r/partition.result | 15 +++++++++ mysql-test/suite/rpl/r/rpl_innodb_bug28430.result | 2 +- mysql-test/t/partition.test | 21 +++++++++++++ sql/ha_partition.cc | 6 +++- storage/archive/ha_archive.cc | 37 +++++++++-------------- 6 files changed, 57 insertions(+), 26 deletions(-) diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result index 77d3cba63d5..5782941fd68 100644 --- a/mysql-test/r/archive.result +++ b/mysql-test/r/archive.result @@ -12665,7 +12665,7 @@ t6 CREATE TABLE `t6` ( `b` tinyblob, `c` int(11) DEFAULT NULL, KEY `a` (`a`) -) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 +) ENGINE=ARCHIVE AUTO_INCREMENT=36 DEFAULT CHARSET=latin1 DROP TABLE t1, t2, t4, t5, t6; create table t1 (i int) engine=archive; insert into t1 values (1); diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 4e4bd0bbc0a..d51315c345e 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1291,4 +1291,19 @@ t1 CREATE TABLE `t1` ( `b` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION p1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION p2 VALUES LESS THAN (20) ENGINE = MyISAM) */ drop table t1, t2; +CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT, +user CHAR(25), PRIMARY KEY(id)) +PARTITION BY RANGE(id) +SUBPARTITION BY hash(id) subpartitions 2 +(PARTITION pa1 values less than (10), +PARTITION pa2 values less than (20), +PARTITION pa11 values less than MAXVALUE); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` mediumint(9) NOT NULL AUTO_INCREMENT, + `user` char(25) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=MyISAM AUTO_INCREMENT=16 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION pa2 VALUES LESS THAN (20) ENGINE = MyISAM, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */ +drop table t1; End of 5.1 tests diff --git a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result index fb2782ed9f4..99d14b638ad 100644 --- a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result +++ b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result @@ -113,7 +113,7 @@ Create Table CREATE TABLE `byrange_tbl` ( `fkid` mediumint(9) DEFAULT NULL, `filler` varchar(255) DEFAULT NULL, PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = InnoDB, PARTITION pa2 VALUES LESS THAN (20) ENGINE = InnoDB, PARTITION pa3 VALUES LESS THAN (30) ENGINE = InnoDB, PARTITION pa4 VALUES LESS THAN (40) ENGINE = InnoDB, PARTITION pa5 VALUES LESS THAN (50) ENGINE = InnoDB, PARTITION pa6 VALUES LESS THAN (60) ENGINE = InnoDB, PARTITION pa7 VALUES LESS THAN (70) ENGINE = InnoDB, PARTITION pa8 VALUES LESS THAN (80) ENGINE = InnoDB, PARTITION pa9 VALUES LESS THAN (90) ENGINE = InnoDB, PARTITION pa10 VALUES LESS THAN (100) ENGINE = InnoDB, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */ +) ENGINE=InnoDB AUTO_INCREMENT=1001 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = InnoDB, PARTITION pa2 VALUES LESS THAN (20) ENGINE = InnoDB, PARTITION pa3 VALUES LESS THAN (30) ENGINE = InnoDB, PARTITION pa4 VALUES LESS THAN (40) ENGINE = InnoDB, PARTITION pa5 VALUES LESS THAN (50) ENGINE = InnoDB, PARTITION pa6 VALUES LESS THAN (60) ENGINE = InnoDB, PARTITION pa7 VALUES LESS THAN (70) ENGINE = InnoDB, PARTITION pa8 VALUES LESS THAN (80) ENGINE = InnoDB, PARTITION pa9 VALUES LESS THAN (90) ENGINE = InnoDB, PARTITION pa10 VALUES LESS THAN (100) ENGINE = InnoDB, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */ show slave status; Slave_IO_State Waiting for master to send event Master_Host 127.0.0.1 diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 2906b4640cd..55b887935e0 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -1528,4 +1528,25 @@ PARTITION BY RANGE (b) ( show create table t1; drop table t1, t2; +# +# Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table) +# + +CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT, + user CHAR(25), PRIMARY KEY(id)) + PARTITION BY RANGE(id) + SUBPARTITION BY hash(id) subpartitions 2 + (PARTITION pa1 values less than (10), + PARTITION pa2 values less than (20), + PARTITION pa11 values less than MAXVALUE); +--disable_query_log +let $n= 15; +while ($n) +{ + insert into t1 (user) values ('mysql'); + dec $n; +} +--enable_query_log +show create table t1; +drop table t1; --echo End of 5.1 tests diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 8afaab71160..15dcc4f3b4a 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1598,7 +1598,11 @@ error: void ha_partition::update_create_info(HA_CREATE_INFO *create_info) { - m_file[0]->update_create_info(create_info); + info(HA_STATUS_AUTO); + + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) + create_info->auto_increment_value= stats.auto_increment_value; + return; } diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 84298e785d1..967e315d4a4 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -357,7 +357,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc) { DBUG_RETURN(NULL); } - stats.auto_increment_value= archive_tmp.auto_increment; + stats.auto_increment_value= archive_tmp.auto_increment + 1; share->rows_recorded= (ha_rows)archive_tmp.rows; share->crashed= archive_tmp.dirty; azclose(&archive_tmp); @@ -586,9 +586,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, DBUG_ENTER("ha_archive::create"); - stats.auto_increment_value= (create_info->auto_increment_value ? - create_info->auto_increment_value -1 : - (ulonglong) 0); + stats.auto_increment_value= create_info->auto_increment_value; for (uint key= 0; key < table_arg->s->keys; key++) { @@ -673,7 +671,8 @@ int ha_archive::create(const char *name, TABLE *table_arg, Yes you need to do this, because the starting value for the autoincrement may not be zero. */ - create_stream.auto_increment= stats.auto_increment_value; + create_stream.auto_increment= stats.auto_increment_value ? + stats.auto_increment_value - 1 : 0; if (azclose(&create_stream)) { error= errno; @@ -871,8 +870,8 @@ int ha_archive::write_row(uchar *buf) else { if (temp_auto > share->archive_write.auto_increment) - stats.auto_increment_value= share->archive_write.auto_increment= - temp_auto; + stats.auto_increment_value= + (share->archive_write.auto_increment= temp_auto) + 1; } } @@ -896,7 +895,7 @@ void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong *first_value, ulonglong *nb_reserved_values) { - *nb_reserved_values= 1; + *nb_reserved_values= ULONGLONG_MAX; *first_value= share->archive_write.auto_increment + 1; } @@ -1315,7 +1314,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) if (!rc) { share->rows_recorded= 0; - stats.auto_increment_value= share->archive_write.auto_increment= 0; + stats.auto_increment_value= 1; + share->archive_write.auto_increment= 0; my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); while (!(rc= get_row(&archive, table->record[0]))) @@ -1332,8 +1332,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) (ulonglong) field->val_int(table->record[0] + field->offset(table->record[0])); if (share->archive_write.auto_increment < auto_value) - stats.auto_increment_value= share->archive_write.auto_increment= - auto_value; + stats.auto_increment_value= + (share->archive_write.auto_increment= auto_value) + 1; } } @@ -1418,18 +1418,9 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) DBUG_ENTER("ha_archive::update_create_info"); ha_archive::info(HA_STATUS_AUTO); - if (create_info->used_fields & HA_CREATE_USED_AUTO) + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { - /* - Internally Archive keeps track of last used, not next used. - To make the output look like MyISAM we add 1 here. - - This is not completely compatible with MYISAM though, since - MyISAM will record on "SHOW CREATE TABLE" the last position, - where we will report the original position the table was - created with. - */ - create_info->auto_increment_value= stats.auto_increment_value + 1; + create_info->auto_increment_value= stats.auto_increment_value; } if (!(my_readlink(share->real_path, share->data_file_name, MYF(0)))) @@ -1494,7 +1485,7 @@ int ha_archive::info(uint flag) pthread_mutex_lock(&share->mutex); azflush(&archive, Z_SYNC_FLUSH); pthread_mutex_unlock(&share->mutex); - stats.auto_increment_value= archive.auto_increment; + stats.auto_increment_value= archive.auto_increment + 1; } DBUG_RETURN(0); -- cgit v1.2.1 From f9bae745058e8eeb27c8a55744effb8afc38474b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 6 Dec 2007 17:15:21 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Return correct key for non-batching inserts --- mysql-test/r/ndb_charset.result | 8 ++++---- mysql-test/r/ndb_index_unique.result | 14 +++++++------- mysql-test/r/ndb_update.result | 2 +- ndb/include/kernel/signaldata/TcKeyRef.hpp | 3 ++- ndb/include/kernel/signaldata/TcRollbackRep.hpp | 3 ++- ndb/include/ndbapi/NdbDictionary.hpp | 7 ++++++- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 1 + ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 21 ++++++++++++++++++++- ndb/src/ndbapi/NdbDictionary.cpp | 9 +++++++++ ndb/src/ndbapi/NdbOperationExec.cpp | 7 +++++++ ndb/src/ndbapi/NdbTransaction.cpp | 13 +++++++++++-- ndb/src/ndbapi/ndberror.c | 2 -- sql/ha_ndbcluster.cc | 23 ++++++++++++++++++++++- 13 files changed, 92 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result index 9e519c39496..ed2b144cac9 100644 --- a/mysql-test/r/ndb_charset.result +++ b/mysql-test/r/ndb_charset.result @@ -112,9 +112,9 @@ unique key(a) ) engine=ndb; insert into t1 values(1, 'aAa'); insert into t1 values(2, 'aaa'); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry 'aaa' for key 2 insert into t1 values(3, 'AAA'); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry 'AAA' for key 2 select * from t1 order by p; p a 1 aAa @@ -138,9 +138,9 @@ unique key(a) ) engine=ndb; insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); insert into t1 values(99,'b'); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry 'b' for key 2 insert into t1 values(99,'a '); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry 'a ' for key 2 select a,length(a) from t1 order by a; a length(a) A 1 diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result index 670fbe5b4e0..b3607fbe072 100644 --- a/mysql-test/r/ndb_index_unique.result +++ b/mysql-test/r/ndb_index_unique.result @@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a; a b c 3 4 6 insert into t1 values(8, 2, 3); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '2' for key 2 select * from t1 order by a; a b c 1 2 3 @@ -89,7 +89,7 @@ a b c 1 1 1 4 4 NULL insert into t1 values(5,1,1); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '1-1' for key 2 drop table t1; CREATE TABLE t2 ( a int unsigned NOT NULL PRIMARY KEY, @@ -112,7 +112,7 @@ select * from t2 where b = 4 order by a; a b c 3 4 6 insert into t2 values(8, 2, 3); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '2-3' for key 2 select * from t2 order by a; a b c 1 2 3 @@ -135,7 +135,7 @@ a b c 8 2 3 create unique index bi using hash on t2(b); insert into t2 values(9, 3, 1); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '3' for key 3 alter table t2 drop index bi; insert into t2 values(9, 3, 1); select * from t2 order by a; @@ -225,7 +225,7 @@ pk a 3 NULL 4 4 insert into t1 values (5,0); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '0' for key 2 select * from t1 order by pk; pk a -1 NULL @@ -258,7 +258,7 @@ pk a b c 0 NULL 18 NULL 1 3 19 abc insert into t2 values(2,3,19,'abc'); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '3-abc' for key 2 select * from t2 order by pk; pk a b c -1 1 17 NULL @@ -678,7 +678,7 @@ create table t1 (a int primary key, b varchar(1000) not null, unique key (b)) engine=ndb charset=utf8; insert into t1 values (1, repeat(_utf8 0xe288ab6474, 200)); insert into t1 values (2, repeat(_utf8 0xe288ab6474, 200)); -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫d' for key 2 select a, sha1(b) from t1; a sha1(b) 1 08f5d02c8b8bc244f275bdfc22c42c5cab0d9d7d diff --git a/mysql-test/r/ndb_update.result b/mysql-test/r/ndb_update.result index 7848a47bcef..6bbf3e6a4e1 100644 --- a/mysql-test/r/ndb_update.result +++ b/mysql-test/r/ndb_update.result @@ -26,7 +26,7 @@ pk1 b c 2 2 2 4 1 1 UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; -ERROR 23000: Duplicate entry '' for key 0 +ERROR 23000: Duplicate entry '2' for key 2 UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4; select * from t1 order by pk1; pk1 b c diff --git a/ndb/include/kernel/signaldata/TcKeyRef.hpp b/ndb/include/kernel/signaldata/TcKeyRef.hpp index 2846ce3854f..56f6cdae29d 100644 --- a/ndb/include/kernel/signaldata/TcKeyRef.hpp +++ b/ndb/include/kernel/signaldata/TcKeyRef.hpp @@ -40,12 +40,13 @@ class TcKeyRef { friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 4 ); + STATIC_CONST( SignalLength = 5 ); private: Uint32 connectPtr; Uint32 transId[2]; Uint32 errorCode; + Uint32 errorData; }; #endif diff --git a/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/ndb/include/kernel/signaldata/TcRollbackRep.hpp index 3b5e2f3d3cb..609756605d5 100644 --- a/ndb/include/kernel/signaldata/TcRollbackRep.hpp +++ b/ndb/include/kernel/signaldata/TcRollbackRep.hpp @@ -38,12 +38,13 @@ class TcRollbackRep { friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 4 ); + STATIC_CONST( SignalLength = 5 ); private: Uint32 connectPtr; Uint32 transId[2]; Uint32 returnCode; + Uint32 errorData; }; #endif diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 445bb513ffc..24fb9811b3d 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -792,7 +792,12 @@ public: * Get the name of the table being indexed */ const char * getTable() const; - + + /** + * Get the table representing the index + */ + const Table * getIndexTable() const; + /** * Get the number of columns in the index */ diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 710d2fde182..6fb03fa2407 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -727,6 +727,7 @@ public: // Index op return context UintR indexOp; UintR clientData; + Uint32 errorData; UintR attrInfoLen; UintR accumulatingIndexOp; diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 91adae183f4..57daffbf331 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -5107,6 +5107,7 @@ void Dbtc::releaseDirtyWrite(Signal* signal) void Dbtc::execLQHKEYREF(Signal* signal) { const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr(); + Uint32 indexId = 0; jamEntry(); UintR compare_transid1, compare_transid2; @@ -5158,6 +5159,10 @@ void Dbtc::execLQHKEYREF(Signal* signal) ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); // The operation executed an index trigger + TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId); + indexId = indexData->indexId; + regApiPtr->errorData = indexId; + ndbout_c("LQHKEYREF, found index %u", indexId); const Uint32 opType = regTcPtr->operation; if (errCode == ZALREADYEXIST) errCode = terrorCode = ZNOTUNIQUE; @@ -5170,7 +5175,6 @@ void Dbtc::execLQHKEYREF(Signal* signal) } else { jam(); /** ZDELETE && NOT_FOUND */ - TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId); if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){ jam(); /** @@ -5242,12 +5246,16 @@ void Dbtc::execLQHKEYREF(Signal* signal) jam(); regApiPtr->lqhkeyreqrec--; // Compensate for extra during read tcKeyRef->connectPtr = indexOp; + ndbout_c("TCKEYREF, sending index %u", indexId); + tcKeyRef->errorData = indexId; EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength); apiConnectptr.i = save; apiConnectptr.p = regApiPtr; } else { jam(); tcKeyRef->connectPtr = clientData; + ndbout_c("TCKEYREF, sending index %u", indexId); + tcKeyRef->errorData = indexId; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB); }//if @@ -10548,6 +10556,7 @@ void Dbtc::releaseAbortResources(Signal* signal) tcRollbackRep->transId[0] = apiConnectptr.p->transid[0]; tcRollbackRep->transId[1] = apiConnectptr.p->transid[1]; tcRollbackRep->returnCode = apiConnectptr.p->returncode; + tcRollbackRep->errorData = apiConnectptr.p->errorData; sendSignal(blockRef, GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB); } @@ -11972,6 +11981,7 @@ void Dbtc::execTCKEYCONF(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -11991,6 +12001,7 @@ void Dbtc::execTCKEYCONF(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12074,6 +12085,7 @@ void Dbtc::execTCKEYREF(Signal* signal) tcIndxRef->transId[0] = tcKeyRef->transId[0]; tcIndxRef->transId[1] = tcKeyRef->transId[1]; tcIndxRef->errorCode = tcKeyRef->errorCode; + tcIndxRef->errorData = 0; releaseIndexOperation(regApiPtr, indexOp); @@ -12151,6 +12163,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4000; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12166,6 +12179,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12194,6 +12208,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); */ @@ -12219,6 +12234,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + // tcIndxRef->errorData = ??; Where to find indexId sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12272,6 +12288,7 @@ void Dbtc::readIndexTable(Signal* signal, tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4000; + // tcIndxRef->errorData = ??; Where to find indexId sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12414,6 +12431,7 @@ void Dbtc::executeIndexOperation(Signal* signal, tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; + tcIndxRef->errorData = 0; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -13012,6 +13030,7 @@ void Dbtc::insertIntoIndexTable(Signal* signal, } regApiPtr->currSavePointId = currSavePointId; + ndbout_c("TCKEYREQ, saving index %u", indexData->indexId); tcConnectptr.p->currentIndexId = indexData->indexId; // *********** KEYINFO *********** diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index 86a6624959e..32a2cd8ba0c 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -542,6 +542,15 @@ NdbDictionary::Index::getTable() const { return m_impl.getTable(); } +const NdbDictionary::Table * +NdbDictionary::Index::getIndexTable() const { + NdbTableImpl * t = m_impl.m_table; + if (t) { + return t->m_facade; + } + return 0; +} + unsigned NdbDictionary::Index::getNoOfColumns() const { return m_impl.m_columns.size(); diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp index feff9ed5f36..9a50b000a1a 100644 --- a/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/ndb/src/ndbapi/NdbOperationExec.cpp @@ -24,6 +24,7 @@ #include "Interpreter.hpp" #include #include +#include #include #include #include @@ -550,6 +551,12 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) theNdbCon->theReturnStatus = NdbTransaction::ReturnFailure; } theError.code = aSignal->readData(4); + if (aSignal->getLength() == TcKeyRef::SignalLength) + { + // Signal may contain additional error data + theError.details = (char *) aSignal->readData(5); + } + theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), ao); if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index 1ebc5b7ef24..262deb0050e 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -30,6 +30,7 @@ #include #include #include +#include /***************************************************************************** NdbTransaction( Ndb* aNdb ); @@ -1757,6 +1758,8 @@ Remark: Handles the reception of the ROLLBACKREP signal. int NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal) { + DBUG_ENTER("NdbTransaction::receiveTCROLLBACKREP"); + /**************************************************************************** Check that we are expecting signals from this transaction and that it doesn't belong to a transaction already completed. Simply ignore messages from other @@ -1764,6 +1767,12 @@ transactions. ****************************************************************************/ if(checkState_TransId(aSignal->getDataPtr() + 1)){ theError.code = aSignal->readData(4);// Override any previous errors + if (aSignal->getLength() == TcRollbackRep::SignalLength) + { + DBUG_PRINT("info", ("Found error data %u", aSignal->readData(5))); + // Signal may contain additional error data + theError.details = (char *) aSignal->readData(5); + } /**********************************************************************/ /* A serious error has occured. This could be due to deadlock or */ @@ -1775,14 +1784,14 @@ transactions. theCompletionStatus = CompletedFailure; theCommitStatus = Aborted; theReturnStatus = ReturnFailure; - return 0; + DBUG_RETURN(0); } else { #ifdef NDB_NO_DROPPED_SIGNAL abort(); #endif } - return -1; + DBUG_RETURN(-1); }//NdbTransaction::receiveTCROLLBACKREP() /******************************************************************************* diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 56eb2c0f8bd..4c60e384e6c 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -640,8 +640,6 @@ ndberror_update(ndberror_struct * error){ if(!found){ error->status = ST_U; } - - error->details = 0; } int diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d29e9345c11..478347e4175 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -540,6 +540,27 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) { + uint error_data= (uint) err.details; + uint dupkey= MAX_KEY; + + DBUG_PRINT("info", ("HA_ERR_FOUND_DUPP_KEY, index table %u", error_data)); + for (uint i= 0; i < MAX_KEY; i++) + { + if (m_index[i].type == UNIQUE_INDEX || + m_index[i].type == UNIQUE_ORDERED_INDEX) + { + const NDBINDEX *unique_index= + (const NDBINDEX *) m_index[i].unique_index; + if (unique_index && + unique_index->getIndexTable() && + (uint) unique_index->getIndexTable()->getTableId() == error_data) + { + DBUG_PRINT("info", ("Found violated key %u", i)); + dupkey= i; + break; + } + } + } if (m_rows_to_insert == 1) { /* @@ -547,7 +568,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) violations here, so we need to return MAX_KEY for non-primary to signal that key is unknown */ - m_dupkey= err.code == 630 ? table->s->primary_key : MAX_KEY; + m_dupkey= err.code == 630 ? table->s->primary_key : dupkey; } else { -- cgit v1.2.1 From 85eed9de26a1957f3911aaa5559d9dcdd493aa33 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 6 Dec 2007 18:03:30 +0100 Subject: Bug#31177: Server variables can't be set to their current values initialise limits properly for getopt_ll_limit() (fixes falcon_opt test) sql/sql_plugin.cc: initialise limits properly for getopt_ll_limit() --- sql/sql_plugin.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 002fb5be31e..d306ded7f4f 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2725,6 +2725,8 @@ bool sys_var_pluginvar::update(THD *thd, set_var *var) static void plugin_opt_set_limits(struct my_option *options, const struct st_mysql_sys_var *opt) { + options->sub_size= 0; + switch (opt->flags & (PLUGIN_VAR_TYPEMASK | PLUGIN_VAR_UNSIGNED | PLUGIN_VAR_THDLOCAL)) { /* global system variables */ -- cgit v1.2.1 From d7a2e00556d3dac76ebb19aa70fef10eb170016a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 6 Dec 2007 22:17:42 +0400 Subject: partition_archive tests added mysql-test/r/partition_archive.result: test results mysql-test/t/partition_archive.test: tests added --- mysql-test/r/partition_archive.result | 78 +++++++++++++++++++++++++++++++++++ mysql-test/t/partition_archive.test | 75 +++++++++++++++++++++++++++++++++ 2 files changed, 153 insertions(+) diff --git a/mysql-test/r/partition_archive.result b/mysql-test/r/partition_archive.result index dd063f6224c..5c84b41e81b 100644 --- a/mysql-test/r/partition_archive.result +++ b/mysql-test/r/partition_archive.result @@ -1,4 +1,5 @@ drop database if exists db99; +drop table if exists t1; create database db99; use db99; create table t1 (a int not null) @@ -11,3 +12,80 @@ alter table t1 add partition (partition p2 values in (3)); alter table t1 drop partition p2; use test; drop database db99; +create table t1 (f1 integer) engine= ARCHIVE partition by list(f1) +( +partition p1 values in (1), +partition p2 values in (NULL), +partition p3 values in (2), +partition p4 values in (3), +partition p5 values in (4) +); +insert into t1 values (1),(2),(3),(4),(null); +select * from t1; +f1 +1 +NULL +2 +3 +4 +select * from t1 where f1 < 3; +f1 +1 +2 +drop table t1; +CREATE TABLE t1 ( +a int not null, +b int not null, +c int not null) engine=ARCHIVE +partition by hash (a + 2) +partitions 3 +(partition x1 tablespace ts1, +partition x2 tablespace ts2, +partition x3 tablespace ts3); +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +select * from t1; +a b c +1 1 1 +4 1 1 +2 1 1 +5 1 1 +3 1 1 +drop table t1; +create table t1 (a int) engine=archive partition by hash(a); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (a) */ +drop table t1; +CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT, +f1 VARCHAR(25), +PRIMARY KEY(id)) ENGINE=ARCHIVE +PARTITION BY RANGE(id) +SUBPARTITION BY hash(id) subpartitions 2 +(PARTITION pa1 values less than (10), +PARTITION pa2 values less than (20), +PARTITION pa3 values less than (30), +PARTITION pa4 values less than (40), +PARTITION pa5 values less than (50), +PARTITION pa6 values less than (60), +PARTITION pa7 values less than (70), +PARTITION pa8 values less than (80), +PARTITION pa9 values less than (90), +PARTITION pa10 values less than (100), +PARTITION pa11 values less than MAXVALUE); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` mediumint(9) NOT NULL AUTO_INCREMENT, + `f1` varchar(25) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=ARCHIVE AUTO_INCREMENT=101 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = ARCHIVE, PARTITION pa2 VALUES LESS THAN (20) ENGINE = ARCHIVE, PARTITION pa3 VALUES LESS THAN (30) ENGINE = ARCHIVE, PARTITION pa4 VALUES LESS THAN (40) ENGINE = ARCHIVE, PARTITION pa5 VALUES LESS THAN (50) ENGINE = ARCHIVE, PARTITION pa6 VALUES LESS THAN (60) ENGINE = ARCHIVE, PARTITION pa7 VALUES LESS THAN (70) ENGINE = ARCHIVE, PARTITION pa8 VALUES LESS THAN (80) ENGINE = ARCHIVE, PARTITION pa9 VALUES LESS THAN (90) ENGINE = ARCHIVE, PARTITION pa10 VALUES LESS THAN (100) ENGINE = ARCHIVE, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = ARCHIVE) */ +select count(*) from t1; +count(*) +100 +drop table t1; diff --git a/mysql-test/t/partition_archive.test b/mysql-test/t/partition_archive.test index 3109894d9c9..fad57107b7d 100644 --- a/mysql-test/t/partition_archive.test +++ b/mysql-test/t/partition_archive.test @@ -16,7 +16,9 @@ # --disable_warnings drop database if exists db99; +drop table if exists t1; --enable_warnings + create database db99; use db99; create table t1 (a int not null) @@ -30,3 +32,76 @@ alter table t1 add partition (partition p2 values in (3)); alter table t1 drop partition p2; use test; drop database db99; + +create table t1 (f1 integer) engine= ARCHIVE partition by list(f1) +( + partition p1 values in (1), + partition p2 values in (NULL), + partition p3 values in (2), + partition p4 values in (3), + partition p5 values in (4) +); + +insert into t1 values (1),(2),(3),(4),(null); +select * from t1; +select * from t1 where f1 < 3; +drop table t1; + +CREATE TABLE t1 ( +a int not null, +b int not null, +c int not null) engine=ARCHIVE +partition by hash (a + 2) +partitions 3 +(partition x1 tablespace ts1, + partition x2 tablespace ts2, + partition x3 tablespace ts3); + +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); + +select * from t1; + +drop table t1; + +# +# Bug #32247 Test reports wrong value of "AUTO_INCREMENT" (on a partitioned InnoDB table) +# (though reported as InnoDB bug, requires some ARCHIVE tests + +create table t1 (a int) engine=archive partition by hash(a); +show create table t1; +drop table t1; + +CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT, + f1 VARCHAR(25), + PRIMARY KEY(id)) ENGINE=ARCHIVE + PARTITION BY RANGE(id) + SUBPARTITION BY hash(id) subpartitions 2 + (PARTITION pa1 values less than (10), + PARTITION pa2 values less than (20), + PARTITION pa3 values less than (30), + PARTITION pa4 values less than (40), + PARTITION pa5 values less than (50), + PARTITION pa6 values less than (60), + PARTITION pa7 values less than (70), + PARTITION pa8 values less than (80), + PARTITION pa9 values less than (90), + PARTITION pa10 values less than (100), + PARTITION pa11 values less than MAXVALUE); + +--disable_query_log +let $n= 100; +while ($n) +{ + insert into t1 (f1) values (repeat('a',25)); + dec $n; +} +--enable_query_log + +show create table t1; +select count(*) from t1; +drop table t1; + -- cgit v1.2.1 From 8db8ab33f6b925f54143b9dbd5d9df40a6b26f3e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 10:33:50 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Post-review fixes --- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 6 +----- ndb/src/ndbapi/NdbTransaction.cpp | 1 - sql/ha_ndbcluster.cc | 2 -- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 57daffbf331..40d5dcf7407 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -5162,7 +5162,6 @@ void Dbtc::execLQHKEYREF(Signal* signal) TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId); indexId = indexData->indexId; regApiPtr->errorData = indexId; - ndbout_c("LQHKEYREF, found index %u", indexId); const Uint32 opType = regTcPtr->operation; if (errCode == ZALREADYEXIST) errCode = terrorCode = ZNOTUNIQUE; @@ -5246,7 +5245,6 @@ void Dbtc::execLQHKEYREF(Signal* signal) jam(); regApiPtr->lqhkeyreqrec--; // Compensate for extra during read tcKeyRef->connectPtr = indexOp; - ndbout_c("TCKEYREF, sending index %u", indexId); tcKeyRef->errorData = indexId; EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength); apiConnectptr.i = save; @@ -5254,7 +5252,6 @@ void Dbtc::execLQHKEYREF(Signal* signal) } else { jam(); tcKeyRef->connectPtr = clientData; - ndbout_c("TCKEYREF, sending index %u", indexId); tcKeyRef->errorData = indexId; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB); @@ -12234,7 +12231,7 @@ void Dbtc::execTRANSID_AI(Signal* signal) tcIndxRef->transId[0] = regApiPtr->transid[0]; tcIndxRef->transId[1] = regApiPtr->transid[1]; tcIndxRef->errorCode = 4349; - // tcIndxRef->errorData = ??; Where to find indexId + tcIndxRef->errorData = regApiPtr->errorData; sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -13030,7 +13027,6 @@ void Dbtc::insertIntoIndexTable(Signal* signal, } regApiPtr->currSavePointId = currSavePointId; - ndbout_c("TCKEYREQ, saving index %u", indexData->indexId); tcConnectptr.p->currentIndexId = indexData->indexId; // *********** KEYINFO *********** diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index 262deb0050e..7d3281a93d1 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -1769,7 +1769,6 @@ transactions. theError.code = aSignal->readData(4);// Override any previous errors if (aSignal->getLength() == TcRollbackRep::SignalLength) { - DBUG_PRINT("info", ("Found error data %u", aSignal->readData(5))); // Signal may contain additional error data theError.details = (char *) aSignal->readData(5); } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 478347e4175..147aeeb24a8 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -543,7 +543,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) uint error_data= (uint) err.details; uint dupkey= MAX_KEY; - DBUG_PRINT("info", ("HA_ERR_FOUND_DUPP_KEY, index table %u", error_data)); for (uint i= 0; i < MAX_KEY; i++) { if (m_index[i].type == UNIQUE_INDEX || @@ -555,7 +554,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) unique_index->getIndexTable() && (uint) unique_index->getIndexTable()->getTableId() == error_data) { - DBUG_PRINT("info", ("Found violated key %u", i)); dupkey= i; break; } -- cgit v1.2.1 From 154ffb3ff5e5b4bf5b15af261ac858ac59adf1f3 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 11:05:19 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Post-merge fixes --- mysql-test/suite/ndb/r/ndb_charset.result | 8 ++++---- mysql-test/suite/ndb/r/ndb_index_unique.result | 14 +++++++------- mysql-test/suite/ndb/r/ndb_update.result | 6 +++--- sql/ha_ndbcluster.cc | 1 - storage/ndb/include/ndbapi/NdbDictionary.hpp | 5 ----- storage/ndb/src/ndbapi/NdbDictionary.cpp | 9 --------- storage/ndb/src/ndbapi/NdbOperationExec.cpp | 2 +- 7 files changed, 15 insertions(+), 30 deletions(-) diff --git a/mysql-test/suite/ndb/r/ndb_charset.result b/mysql-test/suite/ndb/r/ndb_charset.result index 1c65a380039..e84c906dd7e 100644 --- a/mysql-test/suite/ndb/r/ndb_charset.result +++ b/mysql-test/suite/ndb/r/ndb_charset.result @@ -112,9 +112,9 @@ unique key(a) ) engine=ndb; insert into t1 values(1, 'aAa'); insert into t1 values(2, 'aaa'); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry 'aaa' for key 'a' insert into t1 values(3, 'AAA'); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry 'AAA' for key 'a' select * from t1 order by p; p a 1 aAa @@ -138,9 +138,9 @@ unique key(a) ) engine=ndb; insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); insert into t1 values(99,'b'); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry 'b' for key 'a' insert into t1 values(99,'a '); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry 'a ' for key 'a' select a,length(a) from t1 order by a; a length(a) A 1 diff --git a/mysql-test/suite/ndb/r/ndb_index_unique.result b/mysql-test/suite/ndb/r/ndb_index_unique.result index cc63ce69760..bfc0c5a2e56 100644 --- a/mysql-test/suite/ndb/r/ndb_index_unique.result +++ b/mysql-test/suite/ndb/r/ndb_index_unique.result @@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a; a b c 3 4 6 insert into t1 values(8, 2, 3); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '2' for key 'ib' select * from t1 order by a; a b c 1 2 3 @@ -93,7 +93,7 @@ a b c 1 1 1 4 4 NULL insert into t1 values(5,1,1); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '1-1' for key 'bc' drop table t1; CREATE TABLE t2 ( a int unsigned NOT NULL PRIMARY KEY, @@ -116,7 +116,7 @@ select * from t2 where b = 4 order by a; a b c 3 4 6 insert into t2 values(8, 2, 3); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '2-3' for key 'b' select * from t2 order by a; a b c 1 2 3 @@ -139,7 +139,7 @@ a b c 8 2 3 create unique index bi using hash on t2(b); insert into t2 values(9, 3, 1); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '3' for key 'bi' alter table t2 drop index bi; insert into t2 values(9, 3, 1); select * from t2 order by a; @@ -229,7 +229,7 @@ pk a 3 NULL 4 4 insert into t1 values (5,0); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '0' for key 'a' select * from t1 order by pk; pk a -1 NULL @@ -262,7 +262,7 @@ pk a b c 0 NULL 18 NULL 1 3 19 abc insert into t2 values(2,3,19,'abc'); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '3-abc' for key 'si' select * from t2 order by pk; pk a b c -1 1 17 NULL @@ -682,7 +682,7 @@ create table t1 (a int primary key, b varchar(1000) not null, unique key (b)) engine=ndb charset=utf8; insert into t1 values (1, repeat(_utf8 0xe288ab6474, 200)); insert into t1 values (2, repeat(_utf8 0xe288ab6474, 200)); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫d' for key 'b' select a, sha1(b) from t1; a sha1(b) 1 08f5d02c8b8bc244f275bdfc22c42c5cab0d9d7d diff --git a/mysql-test/suite/ndb/r/ndb_update.result b/mysql-test/suite/ndb/r/ndb_update.result index ed51daee5cb..fa083587956 100644 --- a/mysql-test/suite/ndb/r/ndb_update.result +++ b/mysql-test/suite/ndb/r/ndb_update.result @@ -28,7 +28,7 @@ pk1 b c 2 2 2 4 1 1 UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '2' for key 'c' UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4; select * from t1 order by pk1; pk1 b c @@ -62,9 +62,9 @@ INSERT INTO t3 VALUES (2, 2); UPDATE t1 SET a = 1; UPDATE t1 SET a = 1 ORDER BY a; UPDATE t2 SET a = 1; -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '1-2' for key 'a' UPDATE t2 SET a = 1 ORDER BY a; -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '1-2' for key 'a' UPDATE t3 SET a = 1; ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY' UPDATE t3 SET a = 1 ORDER BY a; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 55c45f6c687..a90f854f889 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -599,7 +599,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) const NDBINDEX *unique_index= (const NDBINDEX *) m_index[i].unique_index; if (unique_index && - unique_index->getIndexTable() && (uint) unique_index->getObjectId() == error_data) { dupkey= i; diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 12d5a1a737f..0e782ba9214 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -1021,11 +1021,6 @@ public: */ const char * getTable() const; - /** - * Get the table representing the index - */ - const Table * getIndexTable() const; - /** * Get the number of columns in the index */ diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 2548b96c58a..a4395fc4b9c 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -792,15 +792,6 @@ NdbDictionary::Index::getTable() const { return m_impl.getTable(); } -const NdbDictionary::Table * -NdbDictionary::Index::getIndexTable() const { - NdbTableImpl * t = m_impl.m_table; - if (t) { - return t->m_facade; - } - return 0; -} - unsigned NdbDictionary::Index::getNoOfColumns() const { return m_impl.m_columns.size(); diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index aa2586357f4..cd1ac44d82c 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -549,7 +549,7 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) if (aSignal->getLength() == TcKeyRef::SignalLength) { // Signal may contain additional error data - setErrorDetails((char *) aSignal->readData(5)); + theError.details = (char *) aSignal->readData(5); } theStatus = Finished; -- cgit v1.2.1 From 419e8cf7b2387ca4f30603ffd7cca8931abb7461 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 12:20:04 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: regenerated result --- mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result b/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result index 5519e0dcd0c..39a681e7263 100644 --- a/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result +++ b/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result @@ -389,9 +389,9 @@ INSERT INTO t8 VALUES (99,99,99); INSERT INTO t8 VALUES (99,22,33); ERROR 23000: Duplicate entry '99' for key 'PRIMARY' INSERT INTO t8 VALUES (11,99,33); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '99' for key 'b' INSERT INTO t8 VALUES (11,22,99); -ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +ERROR 23000: Duplicate entry '99' for key 'c' SELECT * FROM t8 ORDER BY a; a b c 99 99 99 -- cgit v1.2.1 From 28d268086c134da75a6517a9d2d89cbc4b842444 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 13:42:44 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 147aeeb24a8..b6d496fe915 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -540,7 +540,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) { - uint error_data= (uint) err.details; + char *error_data= err.details; uint dupkey= MAX_KEY; for (uint i= 0; i < MAX_KEY; i++) @@ -552,7 +552,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) (const NDBINDEX *) m_index[i].unique_index; if (unique_index && unique_index->getIndexTable() && - (uint) unique_index->getIndexTable()->getTableId() == error_data) + unique_index->getIndexTable()->getTableId() == (int) error_data) { dupkey= i; break; -- cgit v1.2.1 From 4931d5cae544bcc026e7f85ebfc6fa935d7074d7 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 14:03:54 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b6d496fe915..78520e4c6d1 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -552,7 +552,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) (const NDBINDEX *) m_index[i].unique_index; if (unique_index && unique_index->getIndexTable() && - unique_index->getIndexTable()->getTableId() == (int) error_data) + (char *) unique_index->getIndexTable()->getTableId() == error_data) { dupkey= i; break; -- cgit v1.2.1 From f3ccf7ea135e8d2979df594df3ac71766c85b3fd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 14:06:44 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index fea50aeecdb..2d8df0f6a47 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -599,7 +599,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) const NDBINDEX *unique_index= (const NDBINDEX *) m_index[i].unique_index; if (unique_index && - (char *) unique_index->getObjectId() == (int) error_data) + (char *) unique_index->getObjectId() == error_data) { dupkey= i; break; -- cgit v1.2.1 From 5826a5c490df8540fbc2b5bed6efad38723619c3 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 15:39:41 +0100 Subject: Bug #27440 read_only allows create and drop database When read_only option was enabled, a user without SUPER privilege could perform CREATE DATABASE and DROP DATABASE operations. This patch adds a check to make sure this isn't possible. It also attempts to simplify the logic used to determine if relevant tables are updated, making it more human readable. mysql-test/r/read_only.result: Updated result file mysql-test/t/read_only.test: A test case is added which shows that it is not possible to drop or create a database in read-only mode despite having the GRANT permissions to do so, SUPER user excepted. sql/sql_parse.cc: - Simplified complex predicate by grouping it in a read friendly way. - Added predicate to fail on database updates while running in read-only mode. --- mysql-test/r/read_only.result | 31 +++++++++++++++++ mysql-test/t/read_only.test | 34 +++++++++++++++++++ sql/sql_parse.cc | 77 ++++++++++++++++++++++++++++++++++++++----- 3 files changed, 134 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/read_only.result b/mysql-test/r/read_only.result index 69d25fbef6f..827a137f5b2 100644 --- a/mysql-test/r/read_only.result +++ b/mysql-test/r/read_only.result @@ -46,4 +46,35 @@ Warnings: Note 1051 Unknown table 'ttt' drop table t1,t2; drop user test@localhost; +# +# Bug #27440 read_only allows create and drop database +# +drop database if exists mysqltest_db1; +drop database if exists mysqltest_db2; +delete from mysql.user where User like 'mysqltest_%'; +delete from mysql.db where User like 'mysqltest_%'; +delete from mysql.tables_priv where User like 'mysqltest_%'; +delete from mysql.columns_priv where User like 'mysqltest_%'; +flush privileges; +grant all on mysqltest_db2.* to `mysqltest_u1`@`%`; +create database mysqltest_db1; +grant all on mysqltest_db1.* to `mysqltest_u1`@`%`; +flush privileges; +show grants for current_user(); +Grants for mysqltest_u1@% +GRANT USAGE ON *.* TO 'mysqltest_u1'@'%' +GRANT ALL PRIVILEGES ON `mysqltest_db2`.* TO 'mysqltest_u1'@'%' +GRANT ALL PRIVILEGES ON `mysqltest_db1`.* TO 'mysqltest_u1'@'%' +create database mysqltest_db2; +ERROR HY000: The MySQL server is running with the --read-only option so it cannot execute this statement +show databases like '%mysqltest_db2%'; +Database (%mysqltest_db2%) +drop database mysqltest_db1; +ERROR HY000: The MySQL server is running with the --read-only option so it cannot execute this statement +delete from mysql.user where User like 'mysqltest_%'; +delete from mysql.db where User like 'mysqltest_%'; +delete from mysql.tables_priv where User like 'mysqltest_%'; +delete from mysql.columns_priv where User like 'mysqltest_%'; +flush privileges; +drop database mysqltest_db1; set global read_only=0; diff --git a/mysql-test/t/read_only.test b/mysql-test/t/read_only.test index 8e14b310f4c..5ec062bc103 100644 --- a/mysql-test/t/read_only.test +++ b/mysql-test/t/read_only.test @@ -117,4 +117,38 @@ connection default; drop table t1,t2; drop user test@localhost; +--echo # +--echo # Bug #27440 read_only allows create and drop database +--echo # +--disable_warnings +drop database if exists mysqltest_db1; +drop database if exists mysqltest_db2; +--enable_warnings + +delete from mysql.user where User like 'mysqltest_%'; +delete from mysql.db where User like 'mysqltest_%'; +delete from mysql.tables_priv where User like 'mysqltest_%'; +delete from mysql.columns_priv where User like 'mysqltest_%'; +flush privileges; + +grant all on mysqltest_db2.* to `mysqltest_u1`@`%`; +create database mysqltest_db1; +grant all on mysqltest_db1.* to `mysqltest_u1`@`%`; +flush privileges; +connect (con_bug27440,127.0.0.1,mysqltest_u1,,test,$MASTER_MYPORT,); +connection con_bug27440; +show grants for current_user(); +--error ER_OPTION_PREVENTS_STATEMENT +create database mysqltest_db2; +show databases like '%mysqltest_db2%'; +--error ER_OPTION_PREVENTS_STATEMENT +drop database mysqltest_db1; +disconnect con_bug27440; +connection default; +delete from mysql.user where User like 'mysqltest_%'; +delete from mysql.db where User like 'mysqltest_%'; +delete from mysql.tables_priv where User like 'mysqltest_%'; +delete from mysql.columns_priv where User like 'mysqltest_%'; +flush privileges; +drop database mysqltest_db1; set global read_only=0; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index e587a9f3561..a5ba486920d 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1597,6 +1597,74 @@ static bool do_command(THD *thd) #endif /* EMBEDDED_LIBRARY */ +/** + @brief Determine if an attempt to update a non-temporary table while the + read-only option was enabled has been made. + + This is a helper function to mysql_execute_command. + + @note SQLCOM_MULTI_UPDATE is an exception and delt with elsewhere. + + @see mysql_execute_command + @returns Status code + @retval TRUE The statement should be denied. + @retval FALSE The statement isn't updating any relevant tables. +*/ + +static my_bool deny_updates_if_read_only_option(THD *thd, + TABLE_LIST *all_tables) +{ + DBUG_ENTER("deny_updates_if_read_only_option"); + + if (!opt_readonly) + DBUG_RETURN(FALSE); + + LEX *lex= thd->lex; + + const my_bool user_is_super= + ((ulong)(thd->security_ctx->master_access & SUPER_ACL) == + (ulong)SUPER_ACL); + + if (user_is_super) + DBUG_RETURN(FALSE); + + if (!uc_update_queries[lex->sql_command]) + DBUG_RETURN(FALSE); + + /* Multi update is an exception and is dealt with later. */ + if (lex->sql_command == SQLCOM_UPDATE_MULTI) + DBUG_RETURN(FALSE); + + const my_bool create_temp_tables= + (lex->sql_command == SQLCOM_CREATE_TABLE) && + (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE); + + const my_bool drop_temp_tables= + (lex->sql_command == SQLCOM_DROP_TABLE) && + lex->drop_temporary; + + const my_bool update_real_tables= + some_non_temp_table_to_be_updated(thd, all_tables) && + !(create_temp_tables || drop_temp_tables); + + + const my_bool create_or_drop_databases= + (lex->sql_command == SQLCOM_CREATE_DB) || + (lex->sql_command == SQLCOM_DROP_DB); + + if (update_real_tables || create_or_drop_databases) + { + /* + An attempt was made to modify one or more non-temporary tables. + */ + DBUG_RETURN(TRUE); + } + + + /* Assuming that only temporary tables are modified. */ + DBUG_RETURN(FALSE); +} + /* Perform one connection-level (COM_XXXX) command. @@ -2590,14 +2658,7 @@ mysql_execute_command(THD *thd) When option readonly is set deny operations which change non-temporary tables. Except for the replication thread and the 'super' users. */ - if (opt_readonly && - !(thd->security_ctx->master_access & SUPER_ACL) && - uc_update_queries[lex->sql_command] && - !((lex->sql_command == SQLCOM_CREATE_TABLE) && - (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) && - !((lex->sql_command == SQLCOM_DROP_TABLE) && lex->drop_temporary) && - ((lex->sql_command != SQLCOM_UPDATE_MULTI) && - some_non_temp_table_to_be_updated(thd, all_tables))) + if (deny_updates_if_read_only_option(thd, all_tables)) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); DBUG_RETURN(-1); -- cgit v1.2.1 From 0ad7fc58c7e149fe7885d6a3558637f87513047a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 22:54:47 +0300 Subject: Bug#32482: Crash for a query with ORDER BY a user variable. The Item_func_set_user_var::register_field_in_read_map() did not check that the result_field was null.This caused server crashes for queries that required order by such a field and were executed without using a temporary table. The Item_func_set_user_var::register_field_in_read_map() now checks the result_field to be not null. mysql-test/t/user_var.test: Added a test case for the bug#32482: Crash for a query with ORDER BY a user variable. mysql-test/r/user_var.result: Added a test case for the bug#32482: Crash for a query with ORDER BY a user variable. sql/item_func.cc: Bug#32482: Crash for a query with ORDER BY a user variable. The Item_func_set_user_var::register_field_in_read_map() now checks the result_field to be not null. --- mysql-test/r/user_var.result | 7 +++++++ mysql-test/t/user_var.test | 8 ++++++++ sql/item_func.cc | 9 ++++++--- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result index 6fd7b39f226..2988a13de41 100644 --- a/mysql-test/r/user_var.result +++ b/mysql-test/r/user_var.result @@ -353,3 +353,10 @@ select @a:=f4, count(f4) from t1 group by 1 desc; 2.6 1 1.6 4 drop table t1; +create table t1 (f1 int); +insert into t1 values (2), (1); +select @i := f1 as j from t1 order by 1; +j +1 +2 +drop table t1; diff --git a/mysql-test/t/user_var.test b/mysql-test/t/user_var.test index 3a3e8f88f83..ca02e0b5f2d 100644 --- a/mysql-test/t/user_var.test +++ b/mysql-test/t/user_var.test @@ -237,3 +237,11 @@ select @a:=f2, count(f2) from t1 group by 1 desc; select @a:=f3, count(f3) from t1 group by 1 desc; select @a:=f4, count(f4) from t1 group by 1 desc; drop table t1; + +# +# Bug#32482: Crash for a query with ORDER BY a user variable. +# +create table t1 (f1 int); +insert into t1 values (2), (1); +select @i := f1 as j from t1 order by 1; +drop table t1; diff --git a/sql/item_func.cc b/sql/item_func.cc index e2551979202..41de960a37e 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3848,9 +3848,12 @@ Item_func_set_user_var::fix_length_and_dec() bool Item_func_set_user_var::register_field_in_read_map(uchar *arg) { - TABLE *table= (TABLE *) arg; - if (result_field->table == table || !table) - bitmap_set_bit(result_field->table->read_set, result_field->field_index); + if (result_field) + { + TABLE *table= (TABLE *) arg; + if (result_field->table == table || !table) + bitmap_set_bit(result_field->table->read_set, result_field->field_index); + } return 0; } -- cgit v1.2.1 From d776054e00b917e1d6fbf0195b924a052466320a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 17:14:59 -0800 Subject: Fixed bug #32815. The index (key_part_1, key_part-2) was erroneously considered as compatible with the required ordering in the function test_test_if_order_by_key when a query with an ORDER BY clause contained a condition of the form key_part_1=const OR key_part_1 IS NULL and the order list contained only key_part_2. This happened because the value of the const_key_parts field in the KEYUSE structure was not formed correctly for the keys that could be used for ref_or_null access. This was fixed in the code of the update_ref_and_keys function. The problem could not manifest itself for MyISAM databases because the implementation of the keys_to_use_for_scanning() handler function always returns an empty bitmap for the MyISAM engine. mysql-test/r/innodb_mysql.result: Added a test case for bug #32815. mysql-test/t/innodb_mysql.test: Added a test case for bug #32815. sql/sql_select.cc: Fixed bug #32815. The index (key_part_1, key_part-2) was erroneously considered as compatible with the required ordering in the function test_test_if_order_by_key when a query with an ORDER BY clause contained a condition of the form key_part_1=const OR key_part_1 IS NULL and the order list contained only key_part_2. This happened because the value of the const_key_parts field in the KEYUSE structure was not formed correctly for the keys that could be used for ref_or_null access. This was fixed in the code of the update_ref_and_keys function. --- mysql-test/r/innodb_mysql.result | 12 ++++++++++++ mysql-test/t/innodb_mysql.test | 15 +++++++++++++++ sql/sql_select.cc | 2 +- 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result index b073e4bd6ce..7450d8c349b 100644 --- a/mysql-test/r/innodb_mysql.result +++ b/mysql-test/r/innodb_mysql.result @@ -1213,4 +1213,16 @@ a b 3 2 1 1 DROP TABLE t1; +CREATE TABLE t1 (id int, type char(6), d int, INDEX idx(id,d)) ENGINE=InnoDB; +INSERT INTO t1 VALUES +(191, 'member', 1), (NULL, 'member', 3), (NULL, 'member', 4), (201, 'member', 2); +EXPLAIN SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx NULL NULL NULL 3 Using where; Using filesort +SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d; +id type d +191 member 1 +NULL member 3 +NULL member 4 +DROP TABLE t1; End of 5.0 tests diff --git a/mysql-test/t/innodb_mysql.test b/mysql-test/t/innodb_mysql.test index f64efd600c5..f1b15d05a60 100644 --- a/mysql-test/t/innodb_mysql.test +++ b/mysql-test/t/innodb_mysql.test @@ -960,4 +960,19 @@ SELECT * FROM t1 ORDER BY b DESC, a ASC; DROP TABLE t1; +# +# Bug #32815: query with ORDER BY and a possible ref_or_null access +# + +CREATE TABLE t1 (id int, type char(6), d int, INDEX idx(id,d)) ENGINE=InnoDB; +INSERT INTO t1 VALUES + (191, 'member', 1), (NULL, 'member', 3), (NULL, 'member', 4), (201, 'member', 2); + +EXPLAIN SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d; +SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d; + +DROP TABLE t1; + + + --echo End of 5.0 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index bcf538cdde2..d1bd018878a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3691,7 +3691,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, found_eq_constant=0; for (i=0 ; i < keyuse->elements-1 ; i++,use++) { - if (!use->used_tables) + if (!use->used_tables && use->optimize != KEY_OPTIMIZE_REF_OR_NULL) use->table->const_key_parts[use->key]|= use->keypart_map; if (use->keypart != FT_KEYPART) { -- cgit v1.2.1 From b1b2e0d053029ef2111b37f6aa624af364186bde Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 7 Dec 2007 23:36:58 -0800 Subject: Fixed bug #27545. Both arguments of the function NAME_CONST must be constant expressions. This constraint is checked in the Item_name_const::fix_fields method. Yet if the argument of the function was not a constant expression no error message was reported. As a result the client hanged waiting for a response. Now the function Item_name_const::fix_fields reports an error message when any of the additional context conditions imposed on the function NAME_CONST is not satisfied. mysql-test/r/func_misc.result: Added a test case for bug #26545. mysql-test/t/func_misc.test: Added a test case for bug #26545. --- mysql-test/r/func_misc.result | 5 +++++ mysql-test/t/func_misc.test | 13 +++++++++++++ sql/item.cc | 14 +++++++------- 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index c941790c35b..d04c22c0c9d 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -207,4 +207,9 @@ test SELECT NAME_CONST('test', 'test'); test test +CREATE TABLE t1 (a int); +INSERT INTO t1 VALUES (5), (2); +SELECT NAME_CONST(x,2) FROM (SELECT a x FROM t1) t; +ERROR HY000: The 'NAME_CONST' syntax is reserved for purposes internal to the MySQL server +DROP TABLE t1; End of 5.0 tests diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index 2c34f77b1ff..e8ee76b3a96 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -204,5 +204,18 @@ SELECT NAME_CONST('test', 1.0); SELECT NAME_CONST('test', -1.0); SELECT NAME_CONST('test', 'test'); +# +# Bug #27545: erroneous usage of NAME_CONST with a name as the first parameter +# resolved against a column name of a derived table hangs the client +# + +CREATE TABLE t1 (a int); +INSERT INTO t1 VALUES (5), (2); + +--error ER_RESERVED_SYNTAX +SELECT NAME_CONST(x,2) FROM (SELECT a x FROM t1) t; + +DROP TABLE t1; + --echo End of 5.0 tests diff --git a/sql/item.cc b/sql/item.cc index 3555df40060..4eeb2b2aa84 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1221,14 +1221,14 @@ bool Item_name_const::fix_fields(THD *thd, Item **ref) s.length(0); if (value_item->fix_fields(thd, &value_item) || - name_item->fix_fields(thd, &name_item)) - return TRUE; - if (!(value_item->const_item() && name_item->const_item())) + name_item->fix_fields(thd, &name_item) || + !value_item->const_item() || + !name_item->const_item() || + !(item_name= name_item->val_str(&s))) // Can't have a NULL name + { + my_error(ER_RESERVED_SYNTAX, MYF(0), "NAME_CONST"); return TRUE; - - if (!(item_name= name_item->val_str(&s))) - return TRUE; /* Can't have a NULL name */ - + } set_name(item_name->ptr(), (uint) item_name->length(), system_charset_info); max_length= value_item->max_length; decimals= value_item->decimals; -- cgit v1.2.1 From b3d8ff4ebd16c418dbce2731a5553c3ae6a47a7f Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 8 Dec 2007 23:05:00 +0200 Subject: Fix for BUG#32694 "NOT NULL table field in a subquery produces invalid results" The problem was that when convert_constant_item is called for subqueries, this happens when we already started executing the top-level query, and the field argument of convert_constant_item pointed to a valid table row. In turn convert_constant_item used the field buffer to compute the value of its item argument. This copied the item's value into the field, and made equalities with outer references always true. The fix saves/restores the original field's value when it belongs to an outer table. mysql-test/r/type_datetime.result: Test for BUG#32694. mysql-test/t/type_datetime.test: Test for BUG#32694. sql/item_cmpfunc.cc: - Changed convert_constant_item() so that it doesn't destroy the contents of its field argument when the field originates from table in an outer query. --- mysql-test/r/type_datetime.result | 55 ++++++++++++++++++++++++++++++++ mysql-test/t/type_datetime.test | 37 +++++++++++++++++++++ sql/item_cmpfunc.cc | 67 ++++++++++++++++++++++++--------------- 3 files changed, 133 insertions(+), 26 deletions(-) diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index 73edfb75ff6..3814e2f8348 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -505,4 +505,59 @@ select sum(a) from t1 group by convert(a, datetime); sum(a) NULL drop table t1; +create table t1 (id int(10) not null, cur_date datetime not null); +create table t2 (id int(10) not null, cur_date date not null); +insert into t1 (id, cur_date) values (1, '2007-04-25 18:30:22'); +insert into t2 (id, cur_date) values (1, '2007-04-25'); +explain extended +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1 +Note 1003 select '1' AS `id`,'2007-04-25 18:30:22' AS `cur_date` from `test`.`t1` where ('1',(select 1 AS `Not_used` from `test`.`t1` `x1` where 0)) +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +id cur_date +explain extended +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1 +Note 1003 select '1' AS `id`,'2007-04-25' AS `cur_date` from `test`.`t2` where ('1',(select 1 AS `Not_used` from `test`.`t2` `x1` where 0)) +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +id cur_date +insert into t1 (id, cur_date) values (2, '2007-04-26 18:30:22'); +insert into t2 (id, cur_date) values (2, '2007-04-26'); +explain extended +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 Using where +Warnings: +Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`cur_date` AS `cur_date` from `test`.`t1` where (`test`.`t1`.`id`,(select 1 AS `Not_used` from `test`.`t1` `x1` where ((`test`.`t1`.`cur_date` = 0) and ((`test`.`t1`.`id`) = `test`.`x1`.`id`)))) +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +id cur_date +explain extended +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 Using where +Warnings: +Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1 +Note 1003 select `test`.`t2`.`id` AS `id`,`test`.`t2`.`cur_date` AS `cur_date` from `test`.`t2` where (`test`.`t2`.`id`,(select 1 AS `Not_used` from `test`.`t2` `x1` where ((`test`.`t2`.`cur_date` = 0) and ((`test`.`t2`.`id`) = `test`.`x1`.`id`)))) +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +id cur_date +drop table t1,t2; End of 5.0 tests diff --git a/mysql-test/t/type_datetime.test b/mysql-test/t/type_datetime.test index 08a94384aec..1f7c11ae8e6 100644 --- a/mysql-test/t/type_datetime.test +++ b/mysql-test/t/type_datetime.test @@ -338,4 +338,41 @@ insert into t1 values (), (), (); select sum(a) from t1 group by convert(a, datetime); drop table t1; +# +# Bug #32694: NOT NULL table field in a subquery produces invalid results +# +create table t1 (id int(10) not null, cur_date datetime not null); +create table t2 (id int(10) not null, cur_date date not null); +insert into t1 (id, cur_date) values (1, '2007-04-25 18:30:22'); +insert into t2 (id, cur_date) values (1, '2007-04-25'); + +explain extended +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); + +explain extended +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); + +insert into t1 (id, cur_date) values (2, '2007-04-26 18:30:22'); +insert into t2 (id, cur_date) values (2, '2007-04-26'); + +explain extended +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); +select * from t1 +where id in (select id from t1 as x1 where (t1.cur_date is null)); + +explain extended +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); +select * from t2 +where id in (select id from t2 as x1 where (t2.cur_date is null)); + +drop table t1,t2; + --echo End of 5.0 tests diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index f9744baf19e..1881d2f7f1f 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -24,7 +24,8 @@ #include #include "sql_select.h" -static bool convert_constant_item(THD *thd, Field *field, Item **item); +static bool convert_constant_item(THD *thd, Item_field *field_item, + Item **item); static Item_result item_store_type(Item_result a, Item *item, my_bool unsigned_flag) @@ -317,7 +318,7 @@ longlong Item_func_nop_all::val_int() SYNOPSIS convert_constant_item() thd thread handle - field item will be converted using the type of this field + field_item item will be converted using the type of this field item [in/out] reference to the item to convert DESCRIPTION @@ -340,30 +341,46 @@ longlong Item_func_nop_all::val_int() 1 Item was replaced with an integer version of the item */ -static bool convert_constant_item(THD *thd, Field *field, Item **item) +static bool convert_constant_item(THD *thd, Item_field *field_item, + Item **item) { + Field *field= field_item->field; + int result= 0; + if (!(*item)->with_subselect && (*item)->const_item()) { /* For comparison purposes allow invalid dates like 2000-01-32 */ ulong orig_sql_mode= thd->variables.sql_mode; enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; + ulonglong orig_field_val; /* original field value if valid */ thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; thd->count_cuted_fields= CHECK_FIELD_IGNORE; + /* + Store the value of the field if it references an outer field because + the call to save_in_field below overrides that value. + */ + if (field_item->depended_from) + orig_field_val= field->val_int(); if (!(*item)->is_null() && !(*item)->save_in_field(field, 1)) { Item *tmp=new Item_int_with_ref(field->val_int(), *item, test(field->flags & UNSIGNED_FLAG)); - thd->variables.sql_mode= orig_sql_mode; - thd->count_cuted_fields= orig_count_cuted_fields; if (tmp) thd->change_item_tree(item, tmp); - return 1; // Item was replaced + result= 1; // Item was replaced + } + /* Restore the original field value. */ + if (field_item->depended_from) + { + result= field->store(orig_field_val, TRUE); + /* orig_field_val must be a valid value that can be restored back. */ + DBUG_ASSERT(!result); } thd->variables.sql_mode= orig_sql_mode; thd->count_cuted_fields= orig_count_cuted_fields; } - return 0; + return result; } @@ -411,15 +428,14 @@ void Item_bool_func2::fix_length_and_dec() thd= current_thd; if (!thd->is_context_analysis_only()) { - Item *arg_real_item= args[0]->real_item(); - if (arg_real_item->type() == FIELD_ITEM) + if (args[0]->real_item()->type() == FIELD_ITEM) { - Field *field=((Item_field*) arg_real_item)->field; - if (field->can_be_compared_as_longlong() && - !(arg_real_item->is_datetime() && + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong() && + !(field_item->is_datetime() && args[1]->result_type() == STRING_RESULT)) { - if (convert_constant_item(thd, field,&args[1])) + if (convert_constant_item(thd, field_item, &args[1])) { cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, INT_RESULT); // Works for all types. @@ -428,15 +444,14 @@ void Item_bool_func2::fix_length_and_dec() } } } - arg_real_item= args[1]->real_item(); - if (arg_real_item->type() == FIELD_ITEM) + if (args[1]->real_item()->type() == FIELD_ITEM) { - Field *field=((Item_field*) arg_real_item)->field; - if (field->can_be_compared_as_longlong() && - !(arg_real_item->is_datetime() && + Item_field *field_item= (Item_field*) (args[1]->real_item()); + if (field_item->field->can_be_compared_as_longlong() && + !(field_item->is_datetime() && args[0]->result_type() == STRING_RESULT)) { - if (convert_constant_item(thd, field,&args[0])) + if (convert_constant_item(thd, field_item, &args[0])) { cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, INT_RESULT); // Works for all types. @@ -1889,16 +1904,16 @@ void Item_func_between::fix_length_and_dec() thd->lex->sql_command != SQLCOM_CREATE_VIEW && thd->lex->sql_command != SQLCOM_SHOW_CREATE) { - Field *field=((Item_field*) (args[0]->real_item()))->field; - if (field->can_be_compared_as_longlong()) + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong()) { /* The following can't be recoded with || as convert_constant_item changes the argument */ - if (convert_constant_item(thd, field,&args[1])) + if (convert_constant_item(thd, field_item, &args[1])) cmp_type=INT_RESULT; // Works for all types. - if (convert_constant_item(thd, field,&args[2])) + if (convert_constant_item(thd, field_item, &args[2])) cmp_type=INT_RESULT; // Works for all types. } } @@ -3491,13 +3506,13 @@ void Item_func_in::fix_length_and_dec() thd->lex->sql_command != SQLCOM_SHOW_CREATE && cmp_type != INT_RESULT) { - Field *field= ((Item_field*) (args[0]->real_item()))->field; - if (field->can_be_compared_as_longlong()) + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong()) { bool all_converted= TRUE; for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) { - if (!convert_constant_item (thd, field, &arg[0])) + if (!convert_constant_item (thd, field_item, &arg[0])) all_converted= FALSE; } if (all_converted) -- cgit v1.2.1 From a7fa078141acd6cde43c9121de8d3df208374c3d Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 8 Dec 2007 23:54:07 +0200 Subject: BUG#32694 post-merge adjustment of EXPLAIN results for 5.1. BitKeeper/etc/ignore: Added ylwrap to the ignore list --- .bzrignore | 1 + mysql-test/r/type_datetime.result | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.bzrignore b/.bzrignore index d30466a0a36..c5cb262d2bd 100644 --- a/.bzrignore +++ b/.bzrignore @@ -3008,3 +3008,4 @@ win/vs71cache.txt win/vs8cache.txt zlib/*.ds? zlib/*.vcproj +ylwrap diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index e5592dc5dc8..d485c468a39 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -512,9 +512,9 @@ insert into t2 (id, cur_date) values (1, '2007-04-25'); explain extended select * from t1 where id in (select id from t1 as x1 where (t1.cur_date is null)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables -2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE Warnings: Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1 Note 1003 select '1' AS `id`,'2007-04-25 18:30:22' AS `cur_date` from `test`.`t1` where ('1',(select 1 AS `Not_used` from `test`.`t1` `x1` where 0)) @@ -524,9 +524,9 @@ id cur_date explain extended select * from t2 where id in (select id from t2 as x1 where (t2.cur_date is null)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables -2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE Warnings: Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1 Note 1003 select '1' AS `id`,'2007-04-25' AS `cur_date` from `test`.`t2` where ('1',(select 1 AS `Not_used` from `test`.`t2` `x1` where 0)) @@ -538,9 +538,9 @@ insert into t2 (id, cur_date) values (2, '2007-04-26'); explain extended select * from t1 where id in (select id from t1 as x1 where (t1.cur_date is null)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where -2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 Using where +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where +2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`cur_date` AS `cur_date` from `test`.`t1` where (`test`.`t1`.`id`,(select 1 AS `Not_used` from `test`.`t1` `x1` where ((`test`.`t1`.`cur_date` = 0) and ((`test`.`t1`.`id`) = `test`.`x1`.`id`)))) @@ -550,9 +550,9 @@ id cur_date explain extended select * from t2 where id in (select id from t2 as x1 where (t2.cur_date is null)); -id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where -2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 Using where +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where +2 DEPENDENT SUBQUERY x1 ALL NULL NULL NULL NULL 2 100.00 Using where Warnings: Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1 Note 1003 select `test`.`t2`.`id` AS `id`,`test`.`t2`.`cur_date` AS `cur_date` from `test`.`t2` where (`test`.`t2`.`id`,(select 1 AS `Not_used` from `test`.`t2` `x1` where ((`test`.`t2`.`cur_date` = 0) and ((`test`.`t2`.`id`) = `test`.`x1`.`id`)))) -- cgit v1.2.1 From 1cdd95f7d1a27723c1ae1d3a6f457a0042b44795 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 9 Dec 2007 11:53:07 -0800 Subject: Forced compilers to remove the warning appeared after the patch with a fix for bug 32694. --- sql/item_cmpfunc.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 1881d2f7f1f..4b584f5cb0b 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -353,6 +353,7 @@ static bool convert_constant_item(THD *thd, Item_field *field_item, ulong orig_sql_mode= thd->variables.sql_mode; enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; ulonglong orig_field_val; /* original field value if valid */ + LINT_INIT(orig_field_val); thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; thd->count_cuted_fields= CHECK_FIELD_IGNORE; -- cgit v1.2.1 From 1ebb563422e5650f80addc3adf08f88044b4b340 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 10 Dec 2007 08:12:41 +0100 Subject: Bug #31177: Server variables can't be set to their current values fixes for SLES10 mysql-test/r/change_user.result: It's unsigned. mysys/my_getopt.c: Failsafe no longer needed, we handle signedness correctly now. sql/set_var.cc: ha_rows can be derived from all kinds of types, but they're all unsigned. Display it as such. tests/mysql_client_test.c: cannot easily check this here due to types. check with Alik whether we need this on top of change_user.test. --- mysql-test/r/change_user.result | 4 ++-- mysys/my_getopt.c | 2 -- sql/set_var.cc | 2 +- tests/mysql_client_test.c | 11 +++++++++++ 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/change_user.result b/mysql-test/r/change_user.result index cb409621d56..28b55dfd5e5 100644 --- a/mysql-test/r/change_user.result +++ b/mysql-test/r/change_user.result @@ -4,14 +4,14 @@ SELECT @@session.sql_big_selects; 1 SELECT @@global.max_join_size; @@global.max_join_size --1 +18446744073709551615 change_user SELECT @@session.sql_big_selects; @@session.sql_big_selects 1 SELECT @@global.max_join_size; @@global.max_join_size --1 +18446744073709551615 SET @@global.max_join_size = 10000; SET @@session.max_join_size = default; change_user diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 7691472b76a..5132ac820b8 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -892,8 +892,6 @@ ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp, num= ((ulonglong) ULONG_MAX); adjusted= TRUE; } -#else - num= min(num, LONG_MAX); #endif break; default: diff --git a/sql/set_var.cc b/sql/set_var.cc index d78a691768e..8a6f0ac6285 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1551,7 +1551,7 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base) pthread_mutex_lock(&LOCK_global_system_variables); value= *(ha_rows*) value_ptr(thd, var_type, base); pthread_mutex_unlock(&LOCK_global_system_variables); - return new Item_int((longlong) value); + return new Item_int((ulonglong) value); } case SHOW_MY_BOOL: { diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 409d67faf90..9b7812838bd 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -16990,6 +16990,12 @@ static void test_bug20023() Check that SQL_BIG_SELECTS will be the original one. ***********************************************************************/ +#if NOT_USED + /* + max_join_size is a ulong or better. + my_snprintf() only goes up to ul. + */ + /* Restore MAX_JOIN_SIZE. */ my_snprintf(query_buffer, @@ -16998,6 +17004,11 @@ static void test_bug20023() (int) max_join_size_orig); DIE_IF(mysql_query(&con, query_buffer)); + +#else + DIE_IF(mysql_query(&con, "SET @@global.max_join_size = -1")); +#endif + DIE_IF(mysql_query(&con, "SET @@session.max_join_size = default")); /* Issue COM_CHANGE_USER. */ -- cgit v1.2.1 From dad3e3f66d2319754617b526893c3ee9f80fb7b2 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 10 Dec 2007 13:29:23 +0100 Subject: backport test for bug#30674 (bug not present in 5.1) --- mysql-test/extra/rpl_tests/rpl_extraMaster_Col.test | 9 +-------- mysql-test/suite/ndb/r/ndb_blob.result | 21 +++++++++++++++++++++ mysql-test/suite/ndb/t/ndb_blob.test | 20 ++++++++++++++++++++ .../suite/rpl/r/rpl_extraColmaster_innodb.result | 15 +++++++++------ .../suite/rpl/r/rpl_extraColmaster_myisam.result | 15 +++++++++------ .../suite/rpl_ndb/r/rpl_ndb_extraColMaster.result | 10 ++++++---- 6 files changed, 66 insertions(+), 24 deletions(-) diff --git a/mysql-test/extra/rpl_tests/rpl_extraMaster_Col.test b/mysql-test/extra/rpl_tests/rpl_extraMaster_Col.test index d3959d10306..1cf11755b3f 100644 --- a/mysql-test/extra/rpl_tests/rpl_extraMaster_Col.test +++ b/mysql-test/extra/rpl_tests/rpl_extraMaster_Col.test @@ -973,14 +973,7 @@ SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; --echo --echo ** update from master ** connection master; -####################################### -# This test should be uncommented -# once bug30674 is patched -####################################### - -#*************************** -#UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; -#*************************** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; --replace_column 5 CURRENT_TIMESTAMP SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; diff --git a/mysql-test/suite/ndb/r/ndb_blob.result b/mysql-test/suite/ndb/r/ndb_blob.result index 34f2c5fdd66..31ce36864af 100644 --- a/mysql-test/suite/ndb/r/ndb_blob.result +++ b/mysql-test/suite/ndb/r/ndb_blob.result @@ -568,3 +568,24 @@ select count(*) from t1; count(*) 0 drop table t1; +create table t1( +a int, +blob_nn blob not null, +text_nn text not null, +blob_nl blob, +text_nl text, +primary key(a) +) engine=ndb; +insert into t1(a) values (1); +Warnings: +Warning 1364 Field 'blob_nn' doesn't have a default value +Warning 1364 Field 'text_nn' doesn't have a default value +insert into t1(a, text_nl) values (2, 'MySQL Cluster NDB'); +Warnings: +Warning 1364 Field 'blob_nn' doesn't have a default value +Warning 1364 Field 'text_nn' doesn't have a default value +select a, length(blob_nn), length(text_nn), blob_nl, text_nl from t1 order by a; +a length(blob_nn) length(text_nn) blob_nl text_nl +1 0 0 NULL NULL +2 0 0 NULL MySQL Cluster NDB +drop table t1; diff --git a/mysql-test/suite/ndb/t/ndb_blob.test b/mysql-test/suite/ndb/t/ndb_blob.test index b9a8c7e20ee..0388913df8b 100644 --- a/mysql-test/suite/ndb/t/ndb_blob.test +++ b/mysql-test/suite/ndb/t/ndb_blob.test @@ -497,3 +497,23 @@ select count(*) from t1; drop table t1; # End of 4.1 tests + + +# bug # 30674 : +# NOT NULL Blobs should default to zero-length. Not NULL TEXT +# should default to zero-chars +create table t1( + a int, + blob_nn blob not null, + text_nn text not null, + blob_nl blob, + text_nl text, + primary key(a) +) engine=ndb; + +insert into t1(a) values (1); +insert into t1(a, text_nl) values (2, 'MySQL Cluster NDB'); + +select a, length(blob_nn), length(text_nn), blob_nl, text_nl from t1 order by a; + +drop table t1; diff --git a/mysql-test/suite/rpl/r/rpl_extraColmaster_innodb.result b/mysql-test/suite/rpl/r/rpl_extraColmaster_innodb.result index 28985eb8cba..5a98fa46785 100644 --- a/mysql-test/suite/rpl/r/rpl_extraColmaster_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extraColmaster_innodb.result @@ -1089,18 +1089,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; @@ -2229,18 +2230,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; @@ -3369,18 +3371,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; diff --git a/mysql-test/suite/rpl/r/rpl_extraColmaster_myisam.result b/mysql-test/suite/rpl/r/rpl_extraColmaster_myisam.result index 966f97e0578..27711666f37 100644 --- a/mysql-test/suite/rpl/r/rpl_extraColmaster_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extraColmaster_myisam.result @@ -1089,18 +1089,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; @@ -2229,18 +2230,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; @@ -3369,18 +3371,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; diff --git a/mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result b/mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result index 38c8ab2afc5..9aa7be985fc 100644 --- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result +++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_extraColMaster.result @@ -1089,18 +1089,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; @@ -2229,18 +2230,19 @@ c1 hex(c4) c5 3 62316231623162316231623162316231 QA ** update from master ** +UPDATE t18 SET c5 = 'TEST' WHERE c6 = 3; SELECT c1,hex(c4),c5,c6,c7 FROM t18 ORDER BY c1; c1 hex(c4) c5 c6 c7 1 62316231623162316231623162316231 Kyle 1 CURRENT_TIMESTAMP 2 62316231623162316231623162316231 JOE 2 CURRENT_TIMESTAMP -3 62316231623162316231623162316231 QA 3 CURRENT_TIMESTAMP +3 62316231623162316231623162316231 TEST 3 CURRENT_TIMESTAMP ** Check slave ** SELECT c1,hex(c4),c5 FROM t18 ORDER BY c1; c1 hex(c4) c5 1 62316231623162316231623162316231 Kyle 2 62316231623162316231623162316231 JOE -3 62316231623162316231623162316231 QA +3 62316231623162316231623162316231 TEST DROP TABLE t18; -- cgit v1.2.1 From dcc2e663916925c152c14599751ab107e0ae7999 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 10 Dec 2007 16:16:21 +0100 Subject: Test fails because of non deterministric function: show grants for current_user() mysql-test/r/read_only.result: Removing non deterministic test results from test. mysql-test/t/read_only.test: Removing non deterministic test results from test. --- mysql-test/r/read_only.result | 5 ----- mysql-test/t/read_only.test | 1 - 2 files changed, 6 deletions(-) diff --git a/mysql-test/r/read_only.result b/mysql-test/r/read_only.result index 827a137f5b2..1bf99a8ea07 100644 --- a/mysql-test/r/read_only.result +++ b/mysql-test/r/read_only.result @@ -60,11 +60,6 @@ grant all on mysqltest_db2.* to `mysqltest_u1`@`%`; create database mysqltest_db1; grant all on mysqltest_db1.* to `mysqltest_u1`@`%`; flush privileges; -show grants for current_user(); -Grants for mysqltest_u1@% -GRANT USAGE ON *.* TO 'mysqltest_u1'@'%' -GRANT ALL PRIVILEGES ON `mysqltest_db2`.* TO 'mysqltest_u1'@'%' -GRANT ALL PRIVILEGES ON `mysqltest_db1`.* TO 'mysqltest_u1'@'%' create database mysqltest_db2; ERROR HY000: The MySQL server is running with the --read-only option so it cannot execute this statement show databases like '%mysqltest_db2%'; diff --git a/mysql-test/t/read_only.test b/mysql-test/t/read_only.test index 5ec062bc103..cca9bbd6fde 100644 --- a/mysql-test/t/read_only.test +++ b/mysql-test/t/read_only.test @@ -137,7 +137,6 @@ grant all on mysqltest_db1.* to `mysqltest_u1`@`%`; flush privileges; connect (con_bug27440,127.0.0.1,mysqltest_u1,,test,$MASTER_MYPORT,); connection con_bug27440; -show grants for current_user(); --error ER_OPTION_PREVENTS_STATEMENT create database mysqltest_db2; show databases like '%mysqltest_db2%'; -- cgit v1.2.1 From 6629d45fd7b8a7203d4c03172e6fd7576b542ad4 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 11:41:19 +0100 Subject: remoce compiler warnings --- ndb/src/ndbapi/NdbOperationDefine.cpp | 2 ++ ndb/src/ndbapi/NdbOperationSearch.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index bb56008cd87..cd357bb44c1 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -586,6 +586,8 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, slack = 4 * totalSizeInWords - attributeSize; break; } + default: + break; } if (((UintPtr)aValue & 3) != 0 || (slack != 0)) diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index 5639c5dfc09..7f8659916dc 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -169,6 +169,8 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, slack = 4*totalSizeInWords - attributeSize; break; } + default: + break; } if (((align & 3) != 0) || (slack != 0) || (tDistrKey && (align != 0))) -- cgit v1.2.1 From e63f750439d25054b67bfcc588f232b0e96bc30e Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 11:51:14 +0100 Subject: Enabled test rpl_ndb_ddl --- mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result | 40 +++++++++++++++++---------- mysql-test/suite/rpl_ndb/t/disabled.def | 1 - 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result index aeaca1e7de0..e668b57293e 100644 --- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result +++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result @@ -1086,6 +1086,9 @@ Modified # Created # Security_type DEFINER Comment +character_set_client latin1 +collation_connection latin1_swedish_ci +Database Collation latin1_swedish_ci -------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; @@ -1097,6 +1100,9 @@ Modified # Created # Security_type DEFINER Comment +character_set_client latin1 +collation_connection latin1_swedish_ci +Database Collation latin1_swedish_ci -------- switch to master ------- @@ -1149,6 +1155,9 @@ Modified # Created # Security_type DEFINER Comment I have been altered +character_set_client latin1 +collation_connection latin1_swedish_ci +Database Collation latin1_swedish_ci -------- switch to slave -------- SHOW PROCEDURE STATUS LIKE 'p1'; @@ -1160,6 +1169,9 @@ Modified # Created # Security_type DEFINER Comment I have been altered +character_set_client latin1 +collation_connection latin1_swedish_ci +Database Collation latin1_swedish_ci -------- switch to master ------- @@ -1251,13 +1263,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci -------- switch to slave -------- SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci -------- switch to master ------- @@ -1302,13 +1314,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci -------- switch to slave -------- SHOW CREATE VIEW v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci -------- switch to master ------- @@ -1402,13 +1414,13 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- SHOW TRIGGERS; -Trigger Event Table Statement Timing Created sql_mode Definer -trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci -------- switch to slave -------- SHOW TRIGGERS; -Trigger Event Table Statement Timing Created sql_mode Definer -trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +trg1 INSERT t1 SET @a:=1 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci -------- switch to master ------- @@ -1453,11 +1465,11 @@ TEST-INFO: SLAVE: The INSERT is committed (Succeeded) -------- switch to master ------- SHOW TRIGGERS; -Trigger Event Table Statement Timing Created sql_mode Definer +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation -------- switch to slave -------- SHOW TRIGGERS; -Trigger Event Table Statement Timing Created sql_mode Definer +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation -------- switch to master ------- diff --git a/mysql-test/suite/rpl_ndb/t/disabled.def b/mysql-test/suite/rpl_ndb/t/disabled.def index 60bfa559953..bb701b9dc3e 100644 --- a/mysql-test/suite/rpl_ndb/t/disabled.def +++ b/mysql-test/suite/rpl_ndb/t/disabled.def @@ -14,7 +14,6 @@ rpl_ndb_2innodb : Bug #32648 Test failure between NDB Cluster and other engines rpl_ndb_2myisam : Bug #32648 Test failure between NDB Cluster and other engines rpl_ndb_2other : Bug #32648 Test failure between NDB Cluster and other engines -rpl_ndb_ddl : BUG#28798 2007-05-31 lars Valgrind failure in NDB rpl_ndb_ctype_ucs2_def : BUG#27404 util thd mysql_parse sig11 when mysqld default multibyte charset rpl_ndb_extraColMaster : BUG#30854 : Tables name show as binary in slave err msg on vm-win2003-64-b and Solaris rpl_ndb_mix_innodb : Bug #32720 Test rpl_ndb_mix_innodb fails on SPARC and PowerPC -- cgit v1.2.1 From c4a39468ea8858cda986974d4728754bf8a37575 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 15:08:00 +0100 Subject: bug#32759 already fixed mysql-test/suite/ndb/t/disabled.def: bug#32759 already fixed - see rev 1.131.1.6 of sql/ha_ndbcluster_binlog.cc --- mysql-test/suite/ndb/t/disabled.def | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/suite/ndb/t/disabled.def b/mysql-test/suite/ndb/t/disabled.def index 1752314ea47..2f07bb052ca 100644 --- a/mysql-test/suite/ndb/t/disabled.def +++ b/mysql-test/suite/ndb/t/disabled.def @@ -12,7 +12,6 @@ partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms -ndb_binlog_basic : Bug #32759 2007-11-27 mats ndb_binlog_basic assert failure 'thd->transaction.stmt.modified_non_trans_table' # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events -- cgit v1.2.1 From 481385de3afda8eca59d982afdc69f42c5349fb9 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 15:19:37 +0100 Subject: compile error fix for osx --- storage/ndb/src/mgmsrv/MgmtSrvr.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index a00c68007a7..184d51bad78 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -3010,7 +3010,6 @@ int MgmtSrvr::connect_to_self(void) return 0; } -template class Vector; template class MutexVector; template class MutexVector; template class Vector; -- cgit v1.2.1 From 8e6e313c3622271981911031528509746f1a68e3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 16:04:33 +0100 Subject: bug#33142: access after free() for blob replication and online alter table --- storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index a82983fca8c..dff953923fe 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -792,6 +792,18 @@ NdbEventOperationImpl::receive_event() p = p->next(); } } + // change the blobHandle's to refer to the new table object. + NdbBlob *p = theBlobList; + while (p) + { + int no = p->getColumn()->getColumnNo(); + NdbColumnImpl *tAttrInfo = at->getColumn(no); + DBUG_PRINT("info", ("blob_handle: 0x%lx " + "switching column impl 0x%lx -> 0x%lx", + (long) p, (long) p->theColumn, (long) tAttrInfo)); + p->theColumn = tAttrInfo; + p = p->next(); + } if (tmp_table_impl) delete tmp_table_impl; } -- cgit v1.2.1 From 841600c05f768efa8464dc2eff3eeb6271bcb73d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 19:51:44 +0100 Subject: remove memleak cased by dynamic variables being lost --- sql/ha_ndbcluster_binlog.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 990c00261a8..d015750ee44 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -243,7 +243,7 @@ static void run_query(THD *thd, char *buf, char *end, { ulong save_thd_query_length= thd->query_length; char *save_thd_query= thd->query; - struct system_variables save_thd_variables= thd->variables; + ulong save_thread_id= thd->variables.pseudo_thread_id; struct system_status_var save_thd_status_var= thd->status_var; THD_TRANS save_thd_transaction_all= thd->transaction.all; THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt; @@ -281,7 +281,7 @@ static void run_query(THD *thd, char *buf, char *end, thd->options= save_thd_options; thd->query_length= save_thd_query_length; thd->query= save_thd_query; - thd->variables= save_thd_variables; + thd->variables.pseudo_thread_id= save_thread_id; thd->status_var= save_thd_status_var; thd->transaction.all= save_thd_transaction_all; thd->transaction.stmt= save_thd_transaction_stmt; -- cgit v1.2.1 From 3e7ec7cfebdd4b94fb71a0baf69372a2db275cf0 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 19:52:35 +0100 Subject: add capability to run certain testsuites optionally depending on name of clone --- mysql-test/mysql-test-run.pl | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 1a8c18f7d27..18633d095b0 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -129,7 +129,15 @@ our $opt_vs_config = $ENV{'MTR_VS_CONFIG'}; our $default_vardir; our $opt_usage; -our $opt_suites= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run +our $opt_suites; +our $opt_suites_default= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run +our @extra_suites= +( + ["mysql-5.1-new-ndb", "ndb_team"], + ["mysql-5.1-telco-6.2", "ndb_team"], + ["mysql-5.1-telco-6.3", "ndb_team"], +); + our $opt_script_debug= 0; # Script debugging, enable with --script-debug our $opt_verbose= 0; # Verbose output, enable with --verbose @@ -397,6 +405,20 @@ sub main () { else { # Figure out which tests we are going to run + if (!$opt_suites) + { + # use default and add any extra_suites as defined + $opt_suites= $opt_suites_default; + my $ddd= basename(dirname($glob_mysql_test_dir)); + foreach my $extra_suite (@extra_suites) + { + if ($extra_suite->[0] eq "$ddd") + { + $opt_suites= "$extra_suite->[1],$opt_suites"; + } + } + } + my $tests= collect_test_cases($opt_suites); # Turn off NDB and other similar options if no tests use it @@ -5195,7 +5217,7 @@ Options to control what test suites or cases to run start-from=PREFIX Run test cases starting from test prefixed with PREFIX suite[s]=NAME1,..,NAMEN Collect tests in suites from the comma separated list of suite names. - The default is: "$opt_suites" + The default is: "$opt_suites_default" skip-rpl Skip the replication test cases. skip-im Don't start IM, and skip the IM test cases big-test Set the environment variable BIG_TEST, which can be -- cgit v1.2.1 From 87d8e15b2e44eb6f95a619e775a0b01af7165aea Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 19:57:26 +0100 Subject: move randomly failing tests to ndb_team suite mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test: Rename: mysql-test/suite/ndb/t/ndb_dd_backuprestore.test -> mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result: Rename: mysql-test/suite/ndb/r/ndb_dd_backuprestore.result -> mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result mysql-test/suite/ndb_team/r/ndb_backup_print.result: Rename: mysql-test/suite/ndb/r/ndb_backup_print.result -> mysql-test/suite/ndb_team/r/ndb_backup_print.result mysql-test/suite/ndb_team/t/ndb_backup_print.test: Rename: mysql-test/suite/ndb/t/ndb_backup_print.test -> mysql-test/suite/ndb_team/t/ndb_backup_print.test --- mysql-test/suite/ndb/r/ndb_backup_print.result | 65 --- mysql-test/suite/ndb/r/ndb_dd_backuprestore.result | 487 --------------------- mysql-test/suite/ndb/t/disabled.def | 2 - mysql-test/suite/ndb/t/ndb_backup_print.test | 68 --- mysql-test/suite/ndb/t/ndb_dd_backuprestore.test | 349 --------------- .../suite/ndb_team/r/ndb_backup_print.result | 65 +++ .../suite/ndb_team/r/ndb_dd_backuprestore.result | 487 +++++++++++++++++++++ mysql-test/suite/ndb_team/t/ndb_backup_print.test | 68 +++ .../suite/ndb_team/t/ndb_dd_backuprestore.test | 349 +++++++++++++++ 9 files changed, 969 insertions(+), 971 deletions(-) delete mode 100644 mysql-test/suite/ndb/r/ndb_backup_print.result delete mode 100644 mysql-test/suite/ndb/r/ndb_dd_backuprestore.result delete mode 100644 mysql-test/suite/ndb/t/ndb_backup_print.test delete mode 100644 mysql-test/suite/ndb/t/ndb_dd_backuprestore.test create mode 100644 mysql-test/suite/ndb_team/r/ndb_backup_print.result create mode 100644 mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result create mode 100644 mysql-test/suite/ndb_team/t/ndb_backup_print.test create mode 100644 mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test diff --git a/mysql-test/suite/ndb/r/ndb_backup_print.result b/mysql-test/suite/ndb/r/ndb_backup_print.result deleted file mode 100644 index 7fa073bafb5..00000000000 --- a/mysql-test/suite/ndb/r/ndb_backup_print.result +++ /dev/null @@ -1,65 +0,0 @@ -use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; -Connected to Management Server at: : -Waiting for completed, this may take several minutes -Node : Backup started from node -Node : Backup started from node completed - StartGCP: StopGCP: - #Records: #LogRecords: - Data: bytes Log: bytes -create table t1 -(pk int key -,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) -,b1 TINYINT, b2 TINYINT UNSIGNED -,c1 SMALLINT, c2 SMALLINT UNSIGNED -,d1 INT, d2 INT UNSIGNED -,e1 BIGINT, e2 BIGINT UNSIGNED -,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY -,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY -,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) -,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) -) engine ndb; -insert into t1 values -(1 -,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 -,127, 255 -,32767, 65535 -,2147483647, 4294967295 -,9223372036854775807, 18446744073709551615 -,'1','12345678901234567890123456789012','123456789' - ,'1','12345678901234567890123456789012','123456789' - ,0x12,0x123456789abcdef0, 0x012345 -,0x12,0x123456789abcdef0, 0x00123450 -); -insert into t1 values -(2 -,0, 0, 0, 0, 0 -,-128, 0 -,-32768, 0 -,-2147483648, 0 -,-9223372036854775808, 0 -,'','','' - ,'','','' - ,0x0,0x0,0x0 -,0x0,0x0,0x0 -); -insert into t1 values -(3 -,NULL,NULL,NULL,NULL,NULL -,NULL,NULL -,NULL,NULL -,NULL,NULL -,NULL,NULL -,NULL,NULL,NULL -,NULL,NULL,NULL -,NULL,NULL,NULL -,NULL,NULL,NULL -); -Connected to Management Server at: : -Waiting for completed, this may take several minutes -Node : Backup started from node -Node : Backup started from node completed - StartGCP: StopGCP: - #Records: #LogRecords: - Data: bytes Log: bytes -drop table t1; diff --git a/mysql-test/suite/ndb/r/ndb_dd_backuprestore.result b/mysql-test/suite/ndb/r/ndb_dd_backuprestore.result deleted file mode 100644 index c82fe560121..00000000000 --- a/mysql-test/suite/ndb/r/ndb_dd_backuprestore.result +++ /dev/null @@ -1,487 +0,0 @@ -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -DROP TABLE IF EXISTS test.t3; -DROP TABLE IF EXISTS test.t4; -DROP TABLE IF EXISTS test.t5; -DROP TABLE IF EXISTS test.t6; -**** Test 1 Simple DD backup and restore **** -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 -(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; -SELECT COUNT(*) FROM test.t1; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden 500 1 -2 Sweden 499 1 -3 Sweden 498 1 -4 Sweden 497 1 -5 Sweden 496 1 -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM test.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM test.backup_info; -@the_backup_id:=backup_id - -DROP TABLE test.backup_info; -DROP TABLE test.t1; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; -SELECT COUNT(*) FROM test.t1; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden 500 1 -2 Sweden 499 1 -3 Sweden 498 1 -4 Sweden 497 1 -5 Sweden 496 1 -**** Test 2 Mixed Cluster Test backup and restore **** -CREATE TABLE test.t2 -(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB; -CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB; -SELECT COUNT(*) FROM test.t1; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden 500 1 -2 Sweden 499 1 -3 Sweden 498 1 -4 Sweden 497 1 -5 Sweden 496 1 -SELECT COUNT(*) FROM test.t2; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden, Texas 500 0 -2 Sweden, Texas 499 0 -3 Sweden, Texas 498 0 -4 Sweden, Texas 497 0 -5 Sweden, Texas 496 0 -SELECT COUNT(*) FROM test.t3; -COUNT(*) -100 -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; -LENGTH(data) -1024 -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; -LENGTH(data) -16384 -SELECT COUNT(*) FROM test.t4; -COUNT(*) -100 -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; -LENGTH(data) -1024 -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; -LENGTH(data) -16384 -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM test.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM test.backup_info; -@the_backup_id:=backup_id - -DROP TABLE test.backup_info; -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; -SELECT COUNT(*) FROM test.t1; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden 500 1 -2 Sweden 499 1 -3 Sweden 498 1 -4 Sweden 497 1 -5 Sweden 496 1 -SELECT COUNT(*) FROM test.t2; -COUNT(*) -500 -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; -pk1 c2 c3 hex(c4) -1 Sweden, Texas 500 0 -2 Sweden, Texas 499 0 -3 Sweden, Texas 498 0 -4 Sweden, Texas 497 0 -5 Sweden, Texas 496 0 -SELECT COUNT(*) FROM test.t3; -COUNT(*) -100 -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; -LENGTH(data) -1024 -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; -LENGTH(data) -16384 -SELECT COUNT(*) FROM test.t4; -COUNT(*) -100 -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; -LENGTH(data) -1024 -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; -LENGTH(data) -16384 -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -**** Test 3 Adding partition Test backup and restore **** -CREATE TABLESPACE table_space2 -ADD DATAFILE './table_space2/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4; -CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2; -CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); -CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); -CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720)); -CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720)); -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(150) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` text NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t3; -Table Create Table -t3 CREATE TABLE `t3` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(202) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t4; -Table Create Table -t4 CREATE TABLE `t4` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(180) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ -SHOW CREATE TABLE test.t5; -Table Create Table -t5 CREATE TABLE `t5` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` text NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t6; -Table Create Table -t6 CREATE TABLE `t6` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(220) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ -SELECT * FROM information_schema.partitions WHERE table_name= 't1'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -SELECT * FROM information_schema.partitions WHERE table_name= 't2'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -SELECT * FROM information_schema.partitions WHERE table_name= 't3'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -SELECT * FROM information_schema.partitions WHERE table_name= 't4'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT * FROM information_schema.partitions WHERE table_name= 't5'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT * FROM information_schema.partitions WHERE table_name= 't6'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT COUNT(*) FROM test.t1; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas 2 0 -249 Sweden, Texas 4 0 -248 Sweden, Texas 6 0 -247 Sweden, Texas 8 0 -246 Sweden, Texas 10 0 -SELECT COUNT(*) FROM test.t2; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 -249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 -248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 -247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 -246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 -SELECT COUNT(*) FROM test.t3; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 -249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 -248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 -247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 -246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 -SELECT COUNT(*) FROM test.t4; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas 2 0 -249 Sweden, Texas 4 0 -248 Sweden, Texas 6 0 -247 Sweden, Texas 8 0 -246 Sweden, Texas 10 0 -SELECT COUNT(*) FROM test.t5; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 -249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 -248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 -247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 -246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 -SELECT COUNT(*) FROM test.t6; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 -249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 -248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 -247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 -246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; -DELETE FROM test.backup_info; -LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; -SELECT @the_backup_id:=backup_id FROM test.backup_info; -@the_backup_id:=backup_id - -DROP TABLE test.backup_info; -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -DROP TABLE test.t5; -DROP TABLE test.t6; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -ALTER TABLESPACE table_space2 -DROP DATAFILE './table_space2/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP TABLESPACE table_space2 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(150) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` text NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t3; -Table Create Table -t3 CREATE TABLE `t3` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(202) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t4; -Table Create Table -t4 CREATE TABLE `t4` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(180) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ -SHOW CREATE TABLE test.t5; -Table Create Table -t5 CREATE TABLE `t5` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` text NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ -SHOW CREATE TABLE test.t6; -Table Create Table -t6 CREATE TABLE `t6` ( - `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, - `c2` varchar(220) NOT NULL, - `c3` int(11) NOT NULL, - `c4` bit(1) NOT NULL, - PRIMARY KEY (`pk1`,`c3`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ -SELECT * FROM information_schema.partitions WHERE table_name= 't1'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 -SELECT * FROM information_schema.partitions WHERE table_name= 't2'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -SELECT * FROM information_schema.partitions WHERE table_name= 't3'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 -SELECT * FROM information_schema.partitions WHERE table_name= 't4'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT * FROM information_schema.partitions WHERE table_name= 't5'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT * FROM information_schema.partitions WHERE table_name= 't6'; -TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL -SELECT COUNT(*) FROM test.t1; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas 2 0 -249 Sweden, Texas 4 0 -248 Sweden, Texas 6 0 -247 Sweden, Texas 8 0 -246 Sweden, Texas 10 0 -SELECT COUNT(*) FROM test.t2; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 -249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 -248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 -247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 -246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 -SELECT COUNT(*) FROM test.t3; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 -249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 -248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 -247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 -246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 -SELECT COUNT(*) FROM test.t4; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas 2 0 -249 Sweden, Texas 4 0 -248 Sweden, Texas 6 0 -247 Sweden, Texas 8 0 -246 Sweden, Texas 10 0 -SELECT COUNT(*) FROM test.t5; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 -249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 -248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 -247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 -246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 -SELECT COUNT(*) FROM test.t6; -COUNT(*) -250 -SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; -pk1 c2 c3 hex(c4) -250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 -249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 -248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 -247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 -246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -DROP TABLE test.t5; -DROP TABLE test.t6; -ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB; -ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB; -DROP TABLESPACE table_space1 ENGINE = NDB; -DROP TABLESPACE table_space2 ENGINE = NDB; -DROP LOGFILE GROUP log_group1 ENGINE = NDB; diff --git a/mysql-test/suite/ndb/t/disabled.def b/mysql-test/suite/ndb/t/disabled.def index 2f07bb052ca..f876039a042 100644 --- a/mysql-test/suite/ndb/t/disabled.def +++ b/mysql-test/suite/ndb/t/disabled.def @@ -16,5 +16,3 @@ ndb_partition_error2 : HF is not sure if the test can work as internded on all # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events #ndb_binlog_discover : bug#21806 2006-08-24 -ndb_backup_print : Bug#32357: ndb_backup_print test fails sometimes in pushbuild -ndb_dd_backuprestore : Bug#32659 ndb_dd_backuprestore.test fails randomly diff --git a/mysql-test/suite/ndb/t/ndb_backup_print.test b/mysql-test/suite/ndb/t/ndb_backup_print.test deleted file mode 100644 index cf869fd56f5..00000000000 --- a/mysql-test/suite/ndb/t/ndb_backup_print.test +++ /dev/null @@ -1,68 +0,0 @@ --- source include/have_ndb.inc --- source include/ndb_default_cluster.inc --- source include/not_embedded.inc - ---disable_warnings -use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; ---enable_warnings - -#NO.1 test output of backup ---exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' - -create table t1 - (pk int key - ,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) - ,b1 TINYINT, b2 TINYINT UNSIGNED - ,c1 SMALLINT, c2 SMALLINT UNSIGNED - ,d1 INT, d2 INT UNSIGNED - ,e1 BIGINT, e2 BIGINT UNSIGNED - ,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY - ,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY - ,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) - ,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) - ) engine ndb; - -insert into t1 values - (1 - ,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 - ,127, 255 - ,32767, 65535 - ,2147483647, 4294967295 - ,9223372036854775807, 18446744073709551615 - ,'1','12345678901234567890123456789012','123456789' - ,'1','12345678901234567890123456789012','123456789' - ,0x12,0x123456789abcdef0, 0x012345 - ,0x12,0x123456789abcdef0, 0x00123450 - ); - -insert into t1 values - (2 - ,0, 0, 0, 0, 0 - ,-128, 0 - ,-32768, 0 - ,-2147483648, 0 - ,-9223372036854775808, 0 - ,'','','' - ,'','','' - ,0x0,0x0,0x0 - ,0x0,0x0,0x0 - ); - -insert into t1 values - (3 - ,NULL,NULL,NULL,NULL,NULL - ,NULL,NULL - ,NULL,NULL - ,NULL,NULL - ,NULL,NULL - ,NULL,NULL,NULL - ,NULL,NULL,NULL - ,NULL,NULL,NULL - ,NULL,NULL,NULL - ); - -#NO.2 test output of backup after some simple SQL operations ---exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' - -drop table t1; diff --git a/mysql-test/suite/ndb/t/ndb_dd_backuprestore.test b/mysql-test/suite/ndb/t/ndb_dd_backuprestore.test deleted file mode 100644 index 48db8ec3e0b..00000000000 --- a/mysql-test/suite/ndb/t/ndb_dd_backuprestore.test +++ /dev/null @@ -1,349 +0,0 @@ -######################################## -# Author: JBM -# Date: 2006-01-24 -# Purpose: Test CDD backup and restore -######################################## - --- source include/have_ndb.inc --- source include/ndb_default_cluster.inc --- source include/not_embedded.inc - ---disable_query_log -set new=on; ---enable_query_log - ---disable_warnings -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -DROP TABLE IF EXISTS test.t3; -DROP TABLE IF EXISTS test.t4; -DROP TABLE IF EXISTS test.t5; -DROP TABLE IF EXISTS test.t6; ---enable_warnings - -############ Test 1 Simple DD backup and restore ############# --- echo **** Test 1 Simple DD backup and restore **** - -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; - -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; - - -CREATE TABLE test.t1 -(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; - -let $j= 500; ---disable_query_log -while ($j) -{ - eval INSERT INTO test.t1 VALUES (NULL, "Sweden", $j, b'1'); - dec $j; -} ---enable_query_log -SELECT COUNT(*) FROM test.t1; -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; - --- source include/ndb_backup.inc - -DROP TABLE test.t1; - -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; - -DROP TABLESPACE table_space1 -ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; - --- source include/ndb_restore_master.inc - -SELECT COUNT(*) FROM test.t1; - -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; - -################# Mixed Cluster Test ############################ --- echo **** Test 2 Mixed Cluster Test backup and restore **** - -CREATE TABLE test.t2 -(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB; - -let $j= 500; ---disable_query_log -while ($j) -{ - eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas", $j, b'0'); - dec $j; -} ---enable_query_log - -CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; - -CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB; - -let $j= 50; ---disable_query_log -while ($j) -{ - INSERT INTO test.t3 VALUES (NULL, repeat('a',1*1024)); - INSERT INTO test.t3 VALUES (NULL, repeat('b',16*1024)); - INSERT INTO test.t4 VALUES (NULL, repeat('a',1*1024)); - INSERT INTO test.t4 VALUES (NULL, repeat('b',16*1024)); - dec $j; -} ---enable_query_log - -SELECT COUNT(*) FROM test.t1; - -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; - -SELECT COUNT(*) FROM test.t2; - -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; - -SELECT COUNT(*) FROM test.t3; - -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; - -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; - -SELECT COUNT(*) FROM test.t4; - -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; - -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; - --- source include/ndb_backup.inc - -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; - -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; - -DROP TABLESPACE table_space1 -ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; - --- source include/ndb_restore_master.inc - -SELECT COUNT(*) FROM test.t1; - -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; - -SELECT COUNT(*) FROM test.t2; - -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; - -SELECT COUNT(*) FROM test.t3; - -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; - -SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; - -SELECT COUNT(*) FROM test.t4; - -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; - -SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; - -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -###################### Adding partition ################################# --- echo **** Test 3 Adding partition Test backup and restore **** - -CREATE TABLESPACE table_space2 -ADD DATAFILE './table_space2/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; - -CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4; - -CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2; - -CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); - -CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); - -CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720)); - -CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720)); - -SHOW CREATE TABLE test.t1; - -SHOW CREATE TABLE test.t2; - -SHOW CREATE TABLE test.t3; - -SHOW CREATE TABLE test.t4; - -SHOW CREATE TABLE test.t5; - -SHOW CREATE TABLE test.t6; - -SELECT * FROM information_schema.partitions WHERE table_name= 't1'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't2'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't3'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't4'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't5'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't6'; - - -let $j= 500; ---disable_query_log -while ($j) -{ - eval INSERT INTO test.t1 VALUES (NULL, "Sweden, Texas", $j, b'0'); - eval INSERT INTO test.t4 VALUES (NULL, "Sweden, Texas", $j, b'0'); - dec $j; - eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1'); - eval INSERT INTO test.t5 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1'); - dec $j; - eval INSERT INTO test.t3 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); - eval INSERT INTO test.t6 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); } --enable_query_log - -SELECT COUNT(*) FROM test.t1; - -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t2; - -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t3; - -SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t4; - -SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t5; - -SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t6; - -SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; - --- source include/ndb_backup.inc - -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -DROP TABLE test.t5; -DROP TABLE test.t6; - -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; - -ALTER TABLESPACE table_space2 -DROP DATAFILE './table_space2/datafile.dat' -ENGINE = NDB; - -DROP TABLESPACE table_space1 -ENGINE = NDB; - -DROP TABLESPACE table_space2 -ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; - --- source include/ndb_restore_master.inc - - -SHOW CREATE TABLE test.t1; - -SHOW CREATE TABLE test.t2; - -SHOW CREATE TABLE test.t3; - -SHOW CREATE TABLE test.t4; - -SHOW CREATE TABLE test.t5; - -SHOW CREATE TABLE test.t6; - -SELECT * FROM information_schema.partitions WHERE table_name= 't1'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't2'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't3'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't4'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't5'; - -SELECT * FROM information_schema.partitions WHERE table_name= 't6'; - -SELECT COUNT(*) FROM test.t1; - -SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t2; - -SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t3; - -SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t4; - -SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t5; - -SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; - -SELECT COUNT(*) FROM test.t6; - -SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; - -# Cleanup - -DROP TABLE test.t1; -DROP TABLE test.t2; -DROP TABLE test.t3; -DROP TABLE test.t4; -DROP TABLE test.t5; -DROP TABLE test.t6; - -ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB; - -ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB; - -DROP TABLESPACE table_space1 ENGINE = NDB; - -DROP TABLESPACE table_space2 ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 ENGINE = NDB; - -#End 5.1 test case - - diff --git a/mysql-test/suite/ndb_team/r/ndb_backup_print.result b/mysql-test/suite/ndb_team/r/ndb_backup_print.result new file mode 100644 index 00000000000..7fa073bafb5 --- /dev/null +++ b/mysql-test/suite/ndb_team/r/ndb_backup_print.result @@ -0,0 +1,65 @@ +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +Connected to Management Server at: : +Waiting for completed, this may take several minutes +Node : Backup started from node +Node : Backup started from node completed + StartGCP: StopGCP: + #Records: #LogRecords: + Data: bytes Log: bytes +create table t1 +(pk int key +,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) +,b1 TINYINT, b2 TINYINT UNSIGNED +,c1 SMALLINT, c2 SMALLINT UNSIGNED +,d1 INT, d2 INT UNSIGNED +,e1 BIGINT, e2 BIGINT UNSIGNED +,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY +,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY +,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) +,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) +) engine ndb; +insert into t1 values +(1 +,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 +,127, 255 +,32767, 65535 +,2147483647, 4294967295 +,9223372036854775807, 18446744073709551615 +,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 +,0x12,0x123456789abcdef0, 0x00123450 +); +insert into t1 values +(2 +,0, 0, 0, 0, 0 +,-128, 0 +,-32768, 0 +,-2147483648, 0 +,-9223372036854775808, 0 +,'','','' + ,'','','' + ,0x0,0x0,0x0 +,0x0,0x0,0x0 +); +insert into t1 values +(3 +,NULL,NULL,NULL,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +); +Connected to Management Server at: : +Waiting for completed, this may take several minutes +Node : Backup started from node +Node : Backup started from node completed + StartGCP: StopGCP: + #Records: #LogRecords: + Data: bytes Log: bytes +drop table t1; diff --git a/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result b/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result new file mode 100644 index 00000000000..c82fe560121 --- /dev/null +++ b/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result @@ -0,0 +1,487 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +DROP TABLE IF EXISTS test.t4; +DROP TABLE IF EXISTS test.t5; +DROP TABLE IF EXISTS test.t6; +**** Test 1 Simple DD backup and restore **** +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 +(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; +SELECT COUNT(*) FROM test.t1; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden 500 1 +2 Sweden 499 1 +3 Sweden 498 1 +4 Sweden 497 1 +5 Sweden 496 1 +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id + +DROP TABLE test.backup_info; +DROP TABLE test.t1; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +SELECT COUNT(*) FROM test.t1; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden 500 1 +2 Sweden 499 1 +3 Sweden 498 1 +4 Sweden 497 1 +5 Sweden 496 1 +**** Test 2 Mixed Cluster Test backup and restore **** +CREATE TABLE test.t2 +(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB; +CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB; +SELECT COUNT(*) FROM test.t1; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden 500 1 +2 Sweden 499 1 +3 Sweden 498 1 +4 Sweden 497 1 +5 Sweden 496 1 +SELECT COUNT(*) FROM test.t2; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden, Texas 500 0 +2 Sweden, Texas 499 0 +3 Sweden, Texas 498 0 +4 Sweden, Texas 497 0 +5 Sweden, Texas 496 0 +SELECT COUNT(*) FROM test.t3; +COUNT(*) +100 +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; +LENGTH(data) +1024 +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; +LENGTH(data) +16384 +SELECT COUNT(*) FROM test.t4; +COUNT(*) +100 +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; +LENGTH(data) +1024 +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; +LENGTH(data) +16384 +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id + +DROP TABLE test.backup_info; +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +SELECT COUNT(*) FROM test.t1; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden 500 1 +2 Sweden 499 1 +3 Sweden 498 1 +4 Sweden 497 1 +5 Sweden 496 1 +SELECT COUNT(*) FROM test.t2; +COUNT(*) +500 +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; +pk1 c2 c3 hex(c4) +1 Sweden, Texas 500 0 +2 Sweden, Texas 499 0 +3 Sweden, Texas 498 0 +4 Sweden, Texas 497 0 +5 Sweden, Texas 496 0 +SELECT COUNT(*) FROM test.t3; +COUNT(*) +100 +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; +LENGTH(data) +1024 +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; +LENGTH(data) +16384 +SELECT COUNT(*) FROM test.t4; +COUNT(*) +100 +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; +LENGTH(data) +1024 +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; +LENGTH(data) +16384 +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +**** Test 3 Adding partition Test backup and restore **** +CREATE TABLESPACE table_space2 +ADD DATAFILE './table_space2/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4; +CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2; +CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); +CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); +CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720)); +CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720)); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(150) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` text NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(202) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t4; +Table Create Table +t4 CREATE TABLE `t4` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(180) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ +SHOW CREATE TABLE test.t5; +Table Create Table +t5 CREATE TABLE `t5` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` text NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t6; +Table Create Table +t6 CREATE TABLE `t6` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(220) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ +SELECT * FROM information_schema.partitions WHERE table_name= 't1'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +SELECT * FROM information_schema.partitions WHERE table_name= 't2'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +SELECT * FROM information_schema.partitions WHERE table_name= 't3'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +SELECT * FROM information_schema.partitions WHERE table_name= 't4'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT * FROM information_schema.partitions WHERE table_name= 't5'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT * FROM information_schema.partitions WHERE table_name= 't6'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT COUNT(*) FROM test.t1; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas 2 0 +249 Sweden, Texas 4 0 +248 Sweden, Texas 6 0 +247 Sweden, Texas 8 0 +246 Sweden, Texas 10 0 +SELECT COUNT(*) FROM test.t2; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 +249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 +248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 +247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 +246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 +SELECT COUNT(*) FROM test.t3; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 +249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 +248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 +247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 +246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 +SELECT COUNT(*) FROM test.t4; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas 2 0 +249 Sweden, Texas 4 0 +248 Sweden, Texas 6 0 +247 Sweden, Texas 8 0 +246 Sweden, Texas 10 0 +SELECT COUNT(*) FROM test.t5; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 +249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 +248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 +247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 +246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 +SELECT COUNT(*) FROM test.t6; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 +249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 +248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 +247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 +246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 +CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; +DELETE FROM test.backup_info; +LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; +SELECT @the_backup_id:=backup_id FROM test.backup_info; +@the_backup_id:=backup_id + +DROP TABLE test.backup_info; +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +DROP TABLE test.t5; +DROP TABLE test.t6; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +ALTER TABLESPACE table_space2 +DROP DATAFILE './table_space2/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP TABLESPACE table_space2 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(150) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */ +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` text NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(202) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t4; +Table Create Table +t4 CREATE TABLE `t4` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(180) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */ +SHOW CREATE TABLE test.t5; +Table Create Table +t5 CREATE TABLE `t5` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` text NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */ +SHOW CREATE TABLE test.t6; +Table Create Table +t6 CREATE TABLE `t6` ( + `pk1` mediumint(9) NOT NULL AUTO_INCREMENT, + `c2` varchar(220) NOT NULL, + `c3` int(11) NOT NULL, + `c4` bit(1) NOT NULL, + PRIMARY KEY (`pk1`,`c3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ +SELECT * FROM information_schema.partitions WHERE table_name= 't1'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space1 +SELECT * FROM information_schema.partitions WHERE table_name= 't2'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +SELECT * FROM information_schema.partitions WHERE table_name= 't3'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default table_space2 +SELECT * FROM information_schema.partitions WHERE table_name= 't4'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT * FROM information_schema.partitions WHERE table_name= 't5'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT * FROM information_schema.partitions WHERE table_name= 't6'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME +NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default NULL +SELECT COUNT(*) FROM test.t1; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas 2 0 +249 Sweden, Texas 4 0 +248 Sweden, Texas 6 0 +247 Sweden, Texas 8 0 +246 Sweden, Texas 10 0 +SELECT COUNT(*) FROM test.t2; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 +249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 +248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 +247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 +246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 +SELECT COUNT(*) FROM test.t3; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 +249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 +248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 +247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 +246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 +SELECT COUNT(*) FROM test.t4; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas 2 0 +249 Sweden, Texas 4 0 +248 Sweden, Texas 6 0 +247 Sweden, Texas 8 0 +246 Sweden, Texas 10 0 +SELECT COUNT(*) FROM test.t5; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1 +249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1 +248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1 +247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1 +246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1 +SELECT COUNT(*) FROM test.t6; +COUNT(*) +250 +SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; +pk1 c2 c3 hex(c4) +250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1 +249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1 +248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1 +247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1 +246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1 +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +DROP TABLE test.t5; +DROP TABLE test.t6; +ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB; +ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB; +DROP TABLESPACE table_space1 ENGINE = NDB; +DROP TABLESPACE table_space2 ENGINE = NDB; +DROP LOGFILE GROUP log_group1 ENGINE = NDB; diff --git a/mysql-test/suite/ndb_team/t/ndb_backup_print.test b/mysql-test/suite/ndb_team/t/ndb_backup_print.test new file mode 100644 index 00000000000..cf869fd56f5 --- /dev/null +++ b/mysql-test/suite/ndb_team/t/ndb_backup_print.test @@ -0,0 +1,68 @@ +-- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc + +--disable_warnings +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +--enable_warnings + +#NO.1 test output of backup +--exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' + +create table t1 + (pk int key + ,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) + ,b1 TINYINT, b2 TINYINT UNSIGNED + ,c1 SMALLINT, c2 SMALLINT UNSIGNED + ,d1 INT, d2 INT UNSIGNED + ,e1 BIGINT, e2 BIGINT UNSIGNED + ,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY + ,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY + ,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) + ,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) + ) engine ndb; + +insert into t1 values + (1 + ,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 + ,127, 255 + ,32767, 65535 + ,2147483647, 4294967295 + ,9223372036854775807, 18446744073709551615 + ,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 + ,0x12,0x123456789abcdef0, 0x00123450 + ); + +insert into t1 values + (2 + ,0, 0, 0, 0, 0 + ,-128, 0 + ,-32768, 0 + ,-2147483648, 0 + ,-9223372036854775808, 0 + ,'','','' + ,'','','' + ,0x0,0x0,0x0 + ,0x0,0x0,0x0 + ); + +insert into t1 values + (3 + ,NULL,NULL,NULL,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ); + +#NO.2 test output of backup after some simple SQL operations +--exec $NDB_MGM --no-defaults -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' + +drop table t1; diff --git a/mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test b/mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test new file mode 100644 index 00000000000..48db8ec3e0b --- /dev/null +++ b/mysql-test/suite/ndb_team/t/ndb_dd_backuprestore.test @@ -0,0 +1,349 @@ +######################################## +# Author: JBM +# Date: 2006-01-24 +# Purpose: Test CDD backup and restore +######################################## + +-- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc + +--disable_query_log +set new=on; +--enable_query_log + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +DROP TABLE IF EXISTS test.t4; +DROP TABLE IF EXISTS test.t5; +DROP TABLE IF EXISTS test.t6; +--enable_warnings + +############ Test 1 Simple DD backup and restore ############# +-- echo **** Test 1 Simple DD backup and restore **** + +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; + +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; + + +CREATE TABLE test.t1 +(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; + +let $j= 500; +--disable_query_log +while ($j) +{ + eval INSERT INTO test.t1 VALUES (NULL, "Sweden", $j, b'1'); + dec $j; +} +--enable_query_log +SELECT COUNT(*) FROM test.t1; +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; + +-- source include/ndb_backup.inc + +DROP TABLE test.t1; + +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +-- source include/ndb_restore_master.inc + +SELECT COUNT(*) FROM test.t1; + +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; + +################# Mixed Cluster Test ############################ +-- echo **** Test 2 Mixed Cluster Test backup and restore **** + +CREATE TABLE test.t2 +(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB; + +let $j= 500; +--disable_query_log +while ($j) +{ + eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas", $j, b'0'); + dec $j; +} +--enable_query_log + +CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; + +CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB; + +let $j= 50; +--disable_query_log +while ($j) +{ + INSERT INTO test.t3 VALUES (NULL, repeat('a',1*1024)); + INSERT INTO test.t3 VALUES (NULL, repeat('b',16*1024)); + INSERT INTO test.t4 VALUES (NULL, repeat('a',1*1024)); + INSERT INTO test.t4 VALUES (NULL, repeat('b',16*1024)); + dec $j; +} +--enable_query_log + +SELECT COUNT(*) FROM test.t1; + +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; + +SELECT COUNT(*) FROM test.t2; + +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; + +SELECT COUNT(*) FROM test.t3; + +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; + +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; + +SELECT COUNT(*) FROM test.t4; + +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; + +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; + +-- source include/ndb_backup.inc + +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; + +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +-- source include/ndb_restore_master.inc + +SELECT COUNT(*) FROM test.t1; + +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; + +SELECT COUNT(*) FROM test.t2; + +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; + +SELECT COUNT(*) FROM test.t3; + +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; + +SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; + +SELECT COUNT(*) FROM test.t4; + +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; + +SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2; + +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +###################### Adding partition ################################# +-- echo **** Test 3 Adding partition Test backup and restore **** + +CREATE TABLESPACE table_space2 +ADD DATAFILE './table_space2/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; + +CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4; + +CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2; + +CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); + +CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); + +CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720)); + +CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720)); + +SHOW CREATE TABLE test.t1; + +SHOW CREATE TABLE test.t2; + +SHOW CREATE TABLE test.t3; + +SHOW CREATE TABLE test.t4; + +SHOW CREATE TABLE test.t5; + +SHOW CREATE TABLE test.t6; + +SELECT * FROM information_schema.partitions WHERE table_name= 't1'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't2'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't3'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't4'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't5'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't6'; + + +let $j= 500; +--disable_query_log +while ($j) +{ + eval INSERT INTO test.t1 VALUES (NULL, "Sweden, Texas", $j, b'0'); + eval INSERT INTO test.t4 VALUES (NULL, "Sweden, Texas", $j, b'0'); + dec $j; + eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1'); + eval INSERT INTO test.t5 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1'); + dec $j; + eval INSERT INTO test.t3 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); + eval INSERT INTO test.t6 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); } --enable_query_log + +SELECT COUNT(*) FROM test.t1; + +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t2; + +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t3; + +SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t4; + +SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t5; + +SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t6; + +SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; + +-- source include/ndb_backup.inc + +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +DROP TABLE test.t5; +DROP TABLE test.t6; + +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +ALTER TABLESPACE table_space2 +DROP DATAFILE './table_space2/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP TABLESPACE table_space2 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +-- source include/ndb_restore_master.inc + + +SHOW CREATE TABLE test.t1; + +SHOW CREATE TABLE test.t2; + +SHOW CREATE TABLE test.t3; + +SHOW CREATE TABLE test.t4; + +SHOW CREATE TABLE test.t5; + +SHOW CREATE TABLE test.t6; + +SELECT * FROM information_schema.partitions WHERE table_name= 't1'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't2'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't3'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't4'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't5'; + +SELECT * FROM information_schema.partitions WHERE table_name= 't6'; + +SELECT COUNT(*) FROM test.t1; + +SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t2; + +SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t3; + +SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t4; + +SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t5; + +SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; + +SELECT COUNT(*) FROM test.t6; + +SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5; + +# Cleanup + +DROP TABLE test.t1; +DROP TABLE test.t2; +DROP TABLE test.t3; +DROP TABLE test.t4; +DROP TABLE test.t5; +DROP TABLE test.t6; + +ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB; + +ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB; + +DROP TABLESPACE table_space1 ENGINE = NDB; + +DROP TABLESPACE table_space2 ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 ENGINE = NDB; + +#End 5.1 test case + + -- cgit v1.2.1 From 2db4748bf2675a1d0b33039c425175f4e2b39940 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 11 Dec 2007 17:30:42 -0500 Subject: Bug #30651 Problems with thread_handling system variable Changed thread_handling variable to a global only, read only variable, as it is currently used. --- mysql-test/r/no-threads.result | 4 ++++ mysql-test/t/no-threads.test | 10 ++++++++++ sql/set_var.cc | 9 ++++++++- sql/set_var.h | 18 ++++++++++++++++++ 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/no-threads.result b/mysql-test/r/no-threads.result index aefecd0f7bc..9bc2dad6e2a 100644 --- a/mysql-test/r/no-threads.result +++ b/mysql-test/r/no-threads.result @@ -7,3 +7,7 @@ select 1+2; SHOW GLOBAL VARIABLES LIKE 'thread_handling'; Variable_name Value thread_handling no-threads +select @@session.thread_handling; +ERROR HY000: Variable 'thread_handling' is a GLOBAL variable +set GLOBAL thread_handling='one-thread'; +ERROR HY000: Variable 'thread_handling' is a read only variable diff --git a/mysql-test/t/no-threads.test b/mysql-test/t/no-threads.test index 31ea6406ee9..fd8365e5678 100644 --- a/mysql-test/t/no-threads.test +++ b/mysql-test/t/no-threads.test @@ -4,3 +4,13 @@ select 1+1; select 1+2; SHOW GLOBAL VARIABLES LIKE 'thread_handling'; + +# +# Bug #30651 Problems with thread_handling system variable +# + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.thread_handling; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set GLOBAL thread_handling='one-thread'; diff --git a/sql/set_var.cc b/sql/set_var.cc index d408b2de64e..31b5ad6331f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -384,7 +384,7 @@ static sys_var_thd_ulong sys_trans_alloc_block_size(&vars, "transaction_alloc_bl static sys_var_thd_ulong sys_trans_prealloc_size(&vars, "transaction_prealloc_size", &SV::trans_prealloc_size, 0, fix_trans_mem_root); -sys_var_thd_enum sys_thread_handling(&vars, "thread_handling", +sys_var_enum_const sys_thread_handling(&vars, "thread_handling", &SV::thread_handling, &thread_handling_typelib, NULL); @@ -1183,6 +1183,13 @@ uchar *sys_var_enum::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) return (uchar*) enum_names->type_names[*value]; } + +uchar *sys_var_enum_const::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + return (uchar*) enum_names->type_names[global_system_variables.*offset]; +} + bool sys_var_thd_ulong::check(THD *thd, set_var *var) { return (sys_var_thd::check(thd, var) || diff --git a/sql/set_var.h b/sql/set_var.h index eb2c893c89e..f25da5dd8be 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -305,6 +305,24 @@ public: }; +class sys_var_enum_const :public sys_var +{ + ulong SV::*offset; + TYPELIB *enum_names; +public: + sys_var_enum_const(sys_var_chain *chain, const char *name_arg, ulong SV::*offset_arg, + TYPELIB *typelib, sys_after_update_func func) + :sys_var(name_arg,func), offset(offset_arg), enum_names(typelib) + { chain_sys_var(chain); } + bool check(THD *thd, set_var *var) { return 1; } + bool update(THD *thd, set_var *var) { return 1; } + SHOW_TYPE show_type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) { return 1; } + bool is_readonly() const { return 1; } + uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + + class sys_var_thd :public sys_var { public: -- cgit v1.2.1 From 2a0d2fef51219846e51eae8a56b2de45193edc37 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 12 Dec 2007 18:21:01 +0300 Subject: Bug#12713 "Error in a stored function called from a SELECT doesn't cause ROLLBACK of statement", part 1. Review fixes. Do not send OK/EOF packets to the client until we reached the end of the current statement. This is a consolidation, to keep the functionality that is shared by all SQL statements in one place in the server. Currently this functionality includes: - close_thread_tables() - log_slow_statement(). After this patch and the subsequent patch for Bug#12713, it shall also include: - ha_autocommit_or_rollback() - net_end_statement() - query_cache_end_of_result(). In future it may also include: - mysql_reset_thd_for_next_command(). include/mysql_com.h: Rename now unused members of NET: no_send_ok, no_send_error, report_error. These were server-specific variables related to the client/server protocol. They have been made obsolete by this patch. Previously the same members of NET were used to store the error message both on the client and on the server. The error message was stored in net.last_error (client: mysql->net.last_error, server: thd->net.last_error). The error code was stored in net.last_errno (client: mysql->net.last_errno, server: thd->net.last_errno). The server error code and message are now stored elsewhere (in the Diagnostics_area), thus NET members are no longer used by the server. Rename last_error to client_last_error, last_errno to client_last_errno to avoid potential bugs introduced by merges. include/mysql_h.ic: Update the ABI file to reflect a rename. Renames do not break the binary compatibility. libmysql/libmysql.c: Rename last_error to client_last_error, last_errno to client_last_errno. This is necessary to ensure no unnoticed bugs introduced by merged changesets. Remove net.report_error, net.no_send_ok, net.no_send_error. libmysql/manager.c: Rename net.last_errno to net.client_last_errno. libmysqld/lib_sql.cc: Rename net.last_errno to net.client_last_errno. Update the embedded implementation of the client-server protocol to reflect the refactoring of protocol.cc. libmysqld/libmysqld.c: Rename net.last_errno to net.client_last_errno. mysql-test/r/events.result: Update to reflect the change in mysql_rm_db(). Now we drop stored routines and events for a given database name only if there is a directory for this database name. ha_drop_database() and query_cache_invalidate() are called likewise. Previously we would attempt to drop routines/events even if database directory was not found (it worked, since routines and events are stored in tables). This fixes Bug 29958 "Weird message on DROP DATABASE if mysql.proc does not exist". The change was done because the previous code used to call send_ok() twice, which led to an assertion failure when asserts against it were added by this patch. mysql-test/r/grant.result: Fix the patch for Bug 16470, now FLUSH PRIVILEGES produces an error if mysql.procs_priv is missing. This fixes the assert that send_ok() must not called after send_error() (the original patch for Bug 16470 was prone to this). mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result: Produce a more detailed error message. mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result: Produce a more detailed error message. mysql-test/t/grant.test: Update the test, now FLUSH PRIVILEGES returns an error if mysql.procs_priv is missing. server-tools/instance-manager/mysql_connection.cc: Rename net.last_errno to net.client_last_errno. sql/ha_ndbcluster_binlog.cc: Add asserts. Use getters to access statement status information. Add a comment why run_query() is broken. Reset the diagnostics area in the end of run_query() to fulfill the invariant that the diagnostics_area is never assigned twice per statement (see the comment in the code when this can happen). We still do not clear thd->is_fatal_error and thd->is_slave_error, which may lead to bugs, I consider the whole affair as something to be dealt with separately. sql/ha_partition.cc: fatal_error() doesn't set an error by itself. Perhaps we should remove this method altogether and instead add a flag to my_error to set thd->is_fatal_error property. Meanwhile, this change is a part of inspection made to the entire source code with the goal to ensure that fatal_error() is always accompanied by my_error(). sql/item_func.cc: There is no net.last_error anymore. Remove the obsolete assignment. sql/log_event.cc: Use getters to access statement error status information. sql/log_event_old.cc: Use getters to access statement error status information. sql/mysqld.cc: Previously, if a continue handler for an error was found, my_message_sql() would not set an error in THD. Since the current statement must be aborted in any case, find_handler() had a hack to assign thd->net.report_error to 1. Remove this hack. Set an error in my_message_sql() even if the continue handler is found. The error will be cleared anyway when the handler is executed. This is one action among many in this patch to ensure the invariant that whenever thd->is_error() is TRUE, we have a message in thd->main_da.message(). sql/net_serv.cc: Use a full-blown my_error() in net_serv.cc to report an error, instead of just setting net->last_errno. This ensures the invariant that whenever thd->is_error() returns TRUE, we have a message in thd->main_da.message(). Remove initialization of removed NET members. sql/opt_range.cc: Use my_error() instead of just raising thd->net.report_error. This ensures the invariant that whenever thd->is_error() returns TRUE, there is a message in thd->main_da.message(). sql/opt_sum.cc: Move invocation of fatal_error() right next to the place where we set the error message. That makes it easier to track that whenever fatal_error() is called, there is a message in THD. sql/protocol.cc: Rename send_ok() and send_eof() to net_send_ok() and net_send_eof() respectively. These functions write directly to the network and are not for use anywhere outside the client/server protocol code. Remove the code that was responsible for cases when either there is no error code, or no error message, or both. Instead the calling code ensures that they are always present. Asserts are added to enforce the invariant. Instead of a direct access to thd->server_status and thd->total_warn_count use function parameters, since these from now on don't always come directly from THD. Introduce net_end_statement(), the single-entry-point replacement API for send_ok(), send_eof() and net_send_error(). Implement Protocol::end_partial_result_set to use in select_send::abort() when there is a continue handler. sql/protocol.h: Update declarations. sql/repl_failsafe.cc: Use getters to access statement status information in THD. Rename net.last_error to net.client_last_error. sql/rpl_record.cc: Set an error message in prepare_record() if there is no default value for the field -- later we do print this message to the client. sql/rpl_rli.cc: Use getters to access statement status information in THD. sql/slave.cc: In create_table_from_dump() (a common function that is used in LOAD MASTER TABLE SQL statement and COM_LOAD_MASTER_DATA), instead of hacks with no_send_ok, clear the diagnostics area when mysql_rm_table() succeeded. Update has_temporary_error() to work correctly when no error is set. This is the case when Incident_log_event is executed: it always returns an error but does not set an error message. Use getters to access error status information. sql/sp_head.cc: Instead of hacks with no_send_error, work through the diagnostics area interface to suppress sending of OK/ERROR packets to the client. Move query_cache_end_of_result before log_slow_statement(), similarly to how it's done in dispatch_command(). sql/sp_rcontext.cc: Remove hacks with assignment of thd->net.report_error, they are not necessary any more (see the changes in mysqld.cc). sql/sql_acl.cc: Use getters to access error status information in THD. sql/sql_base.cc: Access thd->main_da.sql_errno() only if there is an error. This fixes a bug when auto-discovery, that was effectively disabled under pre-locking. sql/sql_binlog.cc: Remove hacks with no_send_ok/no_send_error, they are not necessary anymore: the caller is responsible for network communication. sql/sql_cache.cc: Disable sending of OK/ERROR/EOF packet in the end of dispatch_command if the response has been served from the query cache. This raises the question whether we should store EOF packet in the query cache at all, or generate it anew for each statement (we should generate it anew), but this is to be addressed separately. sql/sql_class.cc: Implement class Diagnostics_area. Please see comments in sql_class.h for details. Fix a subtle coding mistake in select_send::send_data: when on slave, an error in Item::send() was ignored. The problem became visible due to asserts that the diagnostics area is never double assigned. Remove initialization of removed NET members. In select_send::abort() do not call select_send::send_eof(). This is not inheritance-safe. Even if a stored procedure continue handler is found, the current statement is aborted, not succeeded. Instead introduce a Protocol API to send the required response, Protocol::end_partial_result_set(). This simplifies implementation of select_send::send_eof(). No need to add more asserts that there is no error, there is an assert inside Diagnostics_area::set_ok_status() already. Leave no trace of no_send_* in the code. sql/sql_class.h: Declare class Diagnostics_area. Remove the hack with no_send_ok from Substatement_state. Provide inline implementations of send_ok/send_eof. Add commetns. sql/sql_connect.cc: Remove hacks with no_send_error. Since now an error in THD is always set if net->error, it's not necessary to check both net->error and thd->is_error() in the do_command loop. Use thd->main_da.message() instead of net->last_errno. Remove the hack with is_slave_error in sys_init_connect. Since now we do not reset the diagnostics area in net_send_error (it's reset at the beginning of the next statement), we can access it safely even after execute_init_command. sql/sql_db.cc: Update the code to satisfy the invariant that the diagnostics area is never assigned twice. Incidentally, this fixes Bug 29958 "Weird message on DROP DATABASE if mysql.proc does not exist". sql/sql_delete.cc: Change multi-delete to abort in abort(), as per select_send protocol. Fixes the merge error with the test for Bug 29136 sql/sql_derived.cc: Use getters to access error information. sql/sql_insert.cc: Use getters to access error information. sql-common/client.c: Rename last_error to client_last_error, last_errno to client_last_errno. sql/sql_parse.cc: Remove hacks with no_send_error. Deploy net_end_statement(). The story of COM_SHUTDOWN is interesting. Long story short, the server would become on its death's door, and only no_send_ok/no_send_error assigned by send_ok()/net_send_error() would hide its babbling from the client. First of all, COM_QUIT does not require a response. So, the comment saying "Let's send a response to possible COM_QUIT" is not only groundless (even mysqladmin shutdown/mysql_shutdown() doesn't send COM_QUIT after COM_SHUTDOWN), it's plainly incorrect. Secondly, besides this additional 'OK' packet to respond to a hypothetical COM_QUIT, there was the following code in dispatch_command(): if (thd->killed) thd->send_kill_message(); if (thd->is_error() net_send_error(thd); This worked out really funny for the thread through which COM_SHUTDOWN was delivered: we would get COM_SHUTDOWN, say okay, say okay again, kill everybody, get the kill signal ourselves, and then attempt to say "Server shutdown in progress" to the client that is very likely long gone. This all became visible when asserts were added that the Diagnostics_area is not assigned twice. Move query_cache_end_of_result() to the end of dispatch_command(), since net_send_eof() has been moved there. This is safe, query_cache_end_of_result() is a no-op if there is no started query in the cache. Consistently use select_send interface to call abort() or send_eof() depending on the operation result. Remove thd->fatal_error() from reset_master(), it was a no-op. in hacks with no_send_error woudl save us from complete breakage of the client/server protocol. Consistently use select_send::abort() whenever there is an error, and select_send::send_eof() in case of success. The issue became visible due to added asserts. sql/sql_partition.cc: Always set an error in THD whenever there is a call to fatal_error(). sql/sql_prepare.cc: Deploy class Diagnostics_area. Remove the unnecessary juggling with the protocol in Select_fetch_protocol_binary::send_eof(). EOF packet format is protocol-independent. sql/sql_select.cc: Call fatal_error() directly in opt_sum_query. Call my_error() whenever we call thd->fatal_error(). sql/sql_servers.cc: Use getters to access error information in THD. sql/sql_show.cc: Use getters to access error information in THD. Add comments. Call my_error() whenever we call fatal_error(). sql/sql_table.cc: Replace hacks with no_send_ok with the interface of the diagnostics area. Clear the error if ENOENT error in ha_delete_table(). sql/sql_update.cc: Introduce multi_update::abort(), which is the proper way to abort a multi-update. This fixes the merge conflict between this patch and the patch for Bug 29136. sql/table.cc: Use a getter to access error information in THD. sql/tztime.cc: Use a getter to access error information in THD. --- include/mysql_com.h | 19 +- include/mysql_h.ic | 10 +- libmysql/libmysql.c | 40 ++-- libmysql/manager.c | 2 +- libmysqld/lib_sql.cc | 87 +++++--- libmysqld/libmysqld.c | 5 +- mysql-test/r/events.result | 3 +- mysql-test/r/grant.result | 4 +- .../suite/rpl/r/rpl_row_tabledefs_2myisam.result | 4 +- .../suite/rpl/r/rpl_row_tabledefs_3innodb.result | 4 +- mysql-test/t/grant.test | 1 + server-tools/instance-manager/mysql_connection.cc | 2 +- sql-common/client.c | 74 +++---- sql/ha_ndbcluster_binlog.cc | 25 ++- sql/ha_partition.cc | 1 + sql/item_func.cc | 1 - sql/log_event.cc | 52 +++-- sql/log_event_old.cc | 16 +- sql/mysqld.cc | 50 +++-- sql/net_serv.cc | 58 +++--- sql/opt_range.cc | 2 +- sql/opt_sum.cc | 2 + sql/protocol.cc | 227 ++++++++++++++------- sql/protocol.h | 5 +- sql/repl_failsafe.cc | 4 +- sql/rpl_record.cc | 5 +- sql/rpl_rli.cc | 4 +- sql/slave.cc | 60 +++--- sql/sp_head.cc | 16 +- sql/sp_rcontext.cc | 22 +- sql/sql_acl.cc | 9 +- sql/sql_base.cc | 33 ++- sql/sql_binlog.cc | 16 -- sql/sql_cache.cc | 1 + sql/sql_class.cc | 155 +++++++++++--- sql/sql_class.h | 159 ++++++++++++++- sql/sql_connect.cc | 33 +-- sql/sql_db.cc | 5 +- sql/sql_delete.cc | 8 + sql/sql_derived.cc | 5 +- sql/sql_insert.cc | 16 +- sql/sql_parse.cc | 126 ++++++------ sql/sql_partition.cc | 1 + sql/sql_prepare.cc | 27 ++- sql/sql_select.cc | 2 +- sql/sql_servers.cc | 2 +- sql/sql_show.cc | 55 ++--- sql/sql_table.cc | 32 ++- sql/sql_update.cc | 4 + sql/table.cc | 27 +-- sql/tztime.cc | 2 +- 51 files changed, 983 insertions(+), 540 deletions(-) diff --git a/include/mysql_com.h b/include/mysql_com.h index 4ae7f66060f..b94a783e839 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -203,14 +203,10 @@ typedef struct st_net { unsigned int *return_status; unsigned char reading_or_writing; char save_char; - my_bool no_send_ok; /* For SPs and other things that do multiple stmts */ + my_bool unused0; /* Please remove with the next incompatible ABI change. */ my_bool unused; /* Please remove with the next incompatible ABI change */ my_bool compress; - /* - Set if OK packet is already sent, and we do not need to send error - messages - */ - my_bool no_send_error; + my_bool unused1; /* Please remove with the next incompatible ABI change. */ /* Pointer to query object in query cache, do not equal NULL (0) for queries in cache that have not stored its results yet @@ -221,11 +217,14 @@ typedef struct st_net { functions and methods to maintain proper locking. */ unsigned char *query_cache_query; - unsigned int last_errno; - unsigned char error; - my_bool report_error; /* We should report error (we have unreported error) */ + unsigned int client_last_errno; + unsigned char error; + my_bool unused2; /* Please remove with the next incompatible ABI change. */ my_bool return_errno; - char last_error[MYSQL_ERRMSG_SIZE], sqlstate[SQLSTATE_LENGTH+1]; + /** Client library error message buffer. Actually belongs to struct MYSQL. */ + char client_last_error[MYSQL_ERRMSG_SIZE]; + /** Client library sqlstate buffer. Set along with the error message. */ + char sqlstate[SQLSTATE_LENGTH+1]; void *extension; } NET; diff --git a/include/mysql_h.ic b/include/mysql_h.ic index 4bedb9e8050..4f138d9a229 100644 --- a/include/mysql_h.ic +++ b/include/mysql_h.ic @@ -537,16 +537,16 @@ struct __attribute__((aligned(__alignof__(void *)), aligned(__alignof__(unsigned unsigned int * return_status; unsigned char reading_or_writing; char save_char; - my_bool no_send_ok; + my_bool unused0; my_bool unused; my_bool compress; - my_bool no_send_error; + my_bool unused1; unsigned char * query_cache_query; - unsigned int last_errno; + unsigned int client_last_errno; unsigned char error; - my_bool report_error; + my_bool unused2; my_bool return_errno; - char last_error[512]; + char client_last_error[512]; char sqlstate[(5 + 1)]; void * extension; }; diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index bf5dd7d63eb..f5ac1c09248 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -440,11 +440,11 @@ static void expand_error(MYSQL* mysql, int error) char tmp[MYSQL_ERRMSG_SIZE]; char *p; uint err_length; - strmake(tmp, mysql->net.last_error, MYSQL_ERRMSG_SIZE-1); - p = strmake(mysql->net.last_error, ER(error), MYSQL_ERRMSG_SIZE-1); - err_length= (uint) (p - mysql->net.last_error); + strmake(tmp, mysql->net.client_last_error, MYSQL_ERRMSG_SIZE-1); + p = strmake(mysql->net.client_last_error, ER(error), MYSQL_ERRMSG_SIZE-1); + err_length= (uint) (p - mysql->net.client_last_error); strmake(p, tmp, MYSQL_ERRMSG_SIZE-1 - err_length); - mysql->net.last_errno = error; + mysql->net.client_last_errno = error; } /* @@ -870,9 +870,10 @@ my_bool handle_local_infile(MYSQL *mysql, const char *net_filename) VOID(my_net_write(net,(const uchar*) "",0)); /* Server needs one packet */ net_flush(net); strmov(net->sqlstate, unknown_sqlstate); - net->last_errno= (*options->local_infile_error)(li_ptr, - net->last_error, - sizeof(net->last_error)-1); + net->client_last_errno= + (*options->local_infile_error)(li_ptr, + net->client_last_error, + sizeof(net->client_last_error)-1); goto err; } @@ -899,9 +900,10 @@ my_bool handle_local_infile(MYSQL *mysql, const char *net_filename) if (readcount < 0) { - net->last_errno= (*options->local_infile_error)(li_ptr, - net->last_error, - sizeof(net->last_error)-1); + net->client_last_errno= + (*options->local_infile_error)(li_ptr, + net->client_last_error, + sizeof(net->client_last_error)-1); goto err; } @@ -1395,7 +1397,7 @@ const char *cli_read_statistics(MYSQL *mysql) if (!mysql->net.read_pos[0]) { set_mysql_error(mysql, CR_WRONG_HOST_INFO, unknown_sqlstate); - return mysql->net.last_error; + return mysql->net.client_last_error; } return (char*) mysql->net.read_pos; } @@ -1406,7 +1408,7 @@ mysql_stat(MYSQL *mysql) { DBUG_ENTER("mysql_stat"); if (simple_command(mysql,COM_STATISTICS,0,0,0)) - DBUG_RETURN(mysql->net.last_error); + DBUG_RETURN(mysql->net.client_last_error); DBUG_RETURN((*mysql->methods->read_statistics)(mysql)); } @@ -1771,7 +1773,7 @@ static my_bool my_realloc_str(NET *net, ulong length) if (res) { strmov(net->sqlstate, unknown_sqlstate); - strmov(net->last_error, ER(net->last_errno)); + strmov(net->client_last_error, ER(net->client_last_errno)); } net->write_pos= net->buff+ buf_length; } @@ -1822,13 +1824,15 @@ void set_stmt_error(MYSQL_STMT * stmt, int errcode, void set_stmt_errmsg(MYSQL_STMT *stmt, NET *net) { DBUG_ENTER("set_stmt_errmsg"); - DBUG_PRINT("enter", ("error: %d/%s '%s'", net->last_errno, net->sqlstate, - net->last_error)); + DBUG_PRINT("enter", ("error: %d/%s '%s'", + net->client_last_errno, + net->sqlstate, + net->client_last_error)); DBUG_ASSERT(stmt != 0); - stmt->last_errno= net->last_errno; - if (net->last_error && net->last_error[0]) - strmov(stmt->last_error, net->last_error); + stmt->last_errno= net->client_last_errno; + if (net->client_last_error && net->client_last_error[0]) + strmov(stmt->last_error, net->client_last_error); strmov(stmt->sqlstate, net->sqlstate); DBUG_VOID_RETURN; diff --git a/libmysql/manager.c b/libmysql/manager.c index 53ffffa55c0..27d35758f3e 100644 --- a/libmysql/manager.c +++ b/libmysql/manager.c @@ -160,7 +160,7 @@ MYSQL_MANAGER* STDCALL mysql_manager_connect(MYSQL_MANAGER* con, msg_len=strlen(msg_buf); if (my_net_write(&con->net,(uchar*) msg_buf,msg_len) || net_flush(&con->net)) { - con->last_errno=con->net.last_errno; + con->last_errno=con->net.client_last_errno; strmov(con->last_error,"Write error on socket"); goto err; } diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 4963d7fc978..df6d68c7582 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -61,8 +61,8 @@ void embedded_get_error(MYSQL *mysql, MYSQL_DATA *data) { NET *net= &mysql->net; struct embedded_query_result *ei= data->embedded_info; - net->last_errno= ei->last_errno; - strmake(net->last_error, ei->info, sizeof(net->last_error)); + net->client_last_errno= ei->last_errno; + strmake(net->client_last_error, ei->info, sizeof(net->client_last_error)-1); memcpy(net->sqlstate, ei->sqlstate, sizeof(net->sqlstate)); my_free(data, MYF(0)); } @@ -110,12 +110,11 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, arg_length= header_length; } - thd->net.no_send_error= 0; result= dispatch_command(command, thd, (char *) arg, arg_length); thd->cur_data= 0; if (!skip_check) - result= thd->net.last_errno ? -1 : 0; + result= thd->is_error() ? -1 : 0; return result; } @@ -370,7 +369,7 @@ static void emb_free_embedded_thd(MYSQL *mysql) static const char * emb_read_statistics(MYSQL *mysql) { THD *thd= (THD*)mysql->thd; - return thd->net.last_error; + return thd->is_error() ? thd->main_da.message() : ""; } @@ -675,8 +674,10 @@ int check_embedded_connection(MYSQL *mysql, const char *db) err: { NET *net= &mysql->net; - memcpy(net->last_error, thd->net.last_error, sizeof(net->last_error)); - memcpy(net->sqlstate, thd->net.sqlstate, sizeof(net->sqlstate)); + strmake(net->client_last_error, thd->main_da.message(), sizeof(net->client_last_error)-1); + memcpy(net->sqlstate, + mysql_errno_to_sqlstate(thd->main_da.sql_errno()), + sizeof(net->sqlstate)-1); } return result; } @@ -699,9 +700,8 @@ void THD::clear_data_list() void THD::clear_error() { - net.last_error[0]= 0; - net.last_errno= 0; - net.report_error= 0; + if (main_da.is_error()) + main_da.reset_diagnostics_area(); } static char *dup_str_aux(MEM_ROOT *root, const char *from, uint length, @@ -764,20 +764,18 @@ MYSQL_DATA *THD::alloc_new_dataset() } -/* - stores server_status and warning_count in the current - query result structures - - SYNOPSIS - write_eof_packet() - thd current thread +/** + Stores server_status and warning_count in the current + query result structures. - NOTES - should be called to after we get the recordset-result + @param thd current thread + @note Should be called after we get the recordset-result. */ -static void write_eof_packet(THD *thd) +static +void +write_eof_packet(THD *thd, uint server_status, uint total_warn_count) { if (!thd->mysql) // bootstrap file handling return; @@ -788,13 +786,13 @@ static void write_eof_packet(THD *thd) */ if (thd->is_fatal_error) thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; - thd->cur_data->embedded_info->server_status= thd->server_status; + thd->cur_data->embedded_info->server_status= server_status; /* Don't send warn count during SP execution, as the warn_list is cleared between substatements, and mysqltest gets confused */ thd->cur_data->embedded_info->warning_count= - (thd->spcont ? 0 : min(thd->total_warn_count, 65535)); + (thd->spcont ? 0 : min(total_warn_count, 65535)); } @@ -950,7 +948,7 @@ bool Protocol::send_fields(List *list, uint flags) } if (flags & SEND_EOF) - write_eof_packet(thd); + write_eof_packet(thd, thd->server_status, thd->total_warn_count); DBUG_RETURN(prepare_for_send(list)); err: @@ -990,17 +988,35 @@ bool Protocol_binary::write() return false; } + +/** + Embedded library implementation of OK response. + + This function is used by the server to write 'OK' packet to + the "network" when the server is compiled as an embedded library. + Since there is no network in the embedded configuration, + a different implementation is necessary. + Instead of marshalling response parameters to a network representation + and then writing it to the socket, here we simply copy the data to the + corresponding client-side connection structures. + + @sa Server implementation of net_send_ok in protocol.cc for + description of the arguments. + + @return The function does not return errors. +*/ + void -send_ok(THD *thd,ha_rows affected_rows,ulonglong id,const char *message) +net_send_ok(THD *thd, + uint server_status, uint total_warn_count, + ha_rows affected_rows, ulonglong id, const char *message) { - DBUG_ENTER("send_ok"); + DBUG_ENTER("emb_net_send_ok"); MYSQL_DATA *data; MYSQL *mysql= thd->mysql; - + if (!mysql) // bootstrap file handling DBUG_VOID_RETURN; - if (thd->net.no_send_ok) // hack for re-parsing queries - DBUG_VOID_RETURN; if (!(data= thd->alloc_new_dataset())) return; data->embedded_info->affected_rows= affected_rows; @@ -1009,15 +1025,24 @@ send_ok(THD *thd,ha_rows affected_rows,ulonglong id,const char *message) strmake(data->embedded_info->info, message, sizeof(data->embedded_info->info)-1); - write_eof_packet(thd); + write_eof_packet(thd, server_status, total_warn_count); thd->cur_data= 0; DBUG_VOID_RETURN; } + +/** + Embedded library implementation of EOF response. + + @sa net_send_ok + + @return This function does not return errors. +*/ + void -send_eof(THD *thd) +net_send_eof(THD *thd, uint server_status, uint total_warn_count) { - write_eof_packet(thd); + write_eof_packet(thd, server_status, total_warn_count); thd->cur_data= 0; } diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index eb47a045669..a8542f6fca9 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -209,8 +209,9 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, DBUG_RETURN(mysql); error: - DBUG_PRINT("error",("message: %u (%s)", mysql->net.last_errno, - mysql->net.last_error)); + DBUG_PRINT("error",("message: %u (%s)", + mysql->net.client_last_errno, + mysql->net.client_last_error)); { /* Free alloced memory */ my_bool free_me=mysql->free_me; diff --git a/mysql-test/r/events.result b/mysql-test/r/events.result index d1ee6d1c5a1..7b68914e219 100644 --- a/mysql-test/r/events.result +++ b/mysql-test/r/events.result @@ -403,9 +403,10 @@ ERROR 42S02: Table 'mysql.event' doesn't exist DROP DATABASE IF EXISTS mysqltest_no_such_database; Warnings: Note 1008 Can't drop database 'mysqltest_no_such_database'; database doesn't exist -Error 1146 Table 'mysql.event' doesn't exist CREATE DATABASE mysqltest_db2; DROP DATABASE mysqltest_db2; +Warnings: +Error 1146 Table 'mysql.event' doesn't exist OK, there is an unnecessary warning about the non-existent table but it's not easy to fix and no one complained about it. A similar warning is printed if mysql.proc is missing. diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result index 0d40778d5f2..b07f9c1e9e9 100644 --- a/mysql-test/r/grant.result +++ b/mysql-test/r/grant.result @@ -1229,9 +1229,7 @@ set names default; FLUSH PRIVILEGES without procs_priv table. RENAME TABLE mysql.procs_priv TO mysql.procs_gone; FLUSH PRIVILEGES; -Warnings: -Error 1146 Table 'mysql.procs_priv' doesn't exist -Error 1548 Cannot load from mysql.mysql.procs_priv. The table is probably corrupted +ERROR 42S02: Table 'mysql.procs_priv' doesn't exist Assigning privileges without procs_priv table. CREATE DATABASE mysqltest1; CREATE PROCEDURE mysqltest1.test() SQL SECURITY DEFINER diff --git a/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result b/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result index 06dc90f18aa..a8e1c8602e0 100644 --- a/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result +++ b/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result @@ -123,7 +123,7 @@ Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno 1364 -Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. +Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. Field 'x' doesn't have a default value Skip_Counter 0 Exec_Master_Log_Pos # Relay_Log_Space # @@ -141,7 +141,7 @@ Master_SSL_Verify_Server_Cert No Last_IO_Errno 0 Last_IO_Error Last_SQL_Errno 1364 -Last_SQL_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. +Last_SQL_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. Field 'x' doesn't have a default value SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; INSERT INTO t9 VALUES (2); diff --git a/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result b/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result index 3911fe89b7f..12203379269 100644 --- a/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result +++ b/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result @@ -123,7 +123,7 @@ Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno 1364 -Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. +Last_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. Field 'x' doesn't have a default value Skip_Counter 0 Exec_Master_Log_Pos # Relay_Log_Space # @@ -141,7 +141,7 @@ Master_SSL_Verify_Server_Cert No Last_IO_Errno 0 Last_IO_Error Last_SQL_Errno 1364 -Last_SQL_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. +Last_SQL_Error Error in Write_rows event: error during transaction execution on table test.t1_nodef. Field 'x' doesn't have a default value SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2; START SLAVE; INSERT INTO t9 VALUES (2); diff --git a/mysql-test/t/grant.test b/mysql-test/t/grant.test index 2a1940f4326..84660bbe66b 100644 --- a/mysql-test/t/grant.test +++ b/mysql-test/t/grant.test @@ -1284,6 +1284,7 @@ set names default; # --echo FLUSH PRIVILEGES without procs_priv table. RENAME TABLE mysql.procs_priv TO mysql.procs_gone; +--error ER_NO_SUCH_TABLE FLUSH PRIVILEGES; --echo Assigning privileges without procs_priv table. CREATE DATABASE mysqltest1; diff --git a/server-tools/instance-manager/mysql_connection.cc b/server-tools/instance-manager/mysql_connection.cc index bf08f963aa3..3233b7513a1 100644 --- a/server-tools/instance-manager/mysql_connection.cc +++ b/server-tools/instance-manager/mysql_connection.cc @@ -257,7 +257,7 @@ int Mysql_connection::do_command() return 1; if (thread_registry->is_shutdown()) return 1; - net_send_error(&net, net.last_errno); + net_send_error(&net, net.client_last_errno); net.error= 0; return 0; } diff --git a/sql-common/client.c b/sql-common/client.c index f149442f12e..1a0f9c64d7d 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -289,8 +289,8 @@ void set_mysql_error(MYSQL *mysql, int errcode, const char *sqlstate) DBUG_ASSERT(mysql != 0); net= &mysql->net; - net->last_errno= errcode; - strmov(net->last_error, ER(errcode)); + net->client_last_errno= errcode; + strmov(net->client_last_error, ER(errcode)); strmov(net->sqlstate, sqlstate); DBUG_VOID_RETURN; @@ -304,8 +304,8 @@ void set_mysql_error(MYSQL *mysql, int errcode, const char *sqlstate) void net_clear_error(NET *net) { - net->last_errno= 0; - net->last_error[0]= '\0'; + net->client_last_errno= 0; + net->client_last_error[0]= '\0'; strmov(net->sqlstate, not_error_sqlstate); } @@ -331,9 +331,9 @@ static void set_mysql_extended_error(MYSQL *mysql, int errcode, DBUG_ASSERT(mysql != 0); net= &mysql->net; - net->last_errno= errcode; + net->client_last_errno= errcode; va_start(args, format); - my_vsnprintf(net->last_error, sizeof(net->last_error)-1, + my_vsnprintf(net->client_last_error, sizeof(net->client_last_error)-1, format, args); va_end(args); strmov(net->sqlstate, sqlstate); @@ -667,7 +667,7 @@ cli_safe_read(MYSQL *mysql) return (packet_error); #endif /*MYSQL_SERVER*/ end_server(mysql); - set_mysql_error(mysql, net->last_errno == ER_NET_PACKET_TOO_LARGE ? + set_mysql_error(mysql, net->client_last_errno == ER_NET_PACKET_TOO_LARGE ? CR_NET_PACKET_TOO_LARGE: CR_SERVER_LOST, unknown_sqlstate); return (packet_error); } @@ -676,7 +676,7 @@ cli_safe_read(MYSQL *mysql) if (len > 3) { char *pos=(char*) net->read_pos+1; - net->last_errno=uint2korr(pos); + net->client_last_errno=uint2korr(pos); pos+=2; len-=2; if (protocol_41(mysql) && pos[0] == '#') @@ -684,8 +684,8 @@ cli_safe_read(MYSQL *mysql) strmake(net->sqlstate, pos+1, SQLSTATE_LENGTH); pos+= SQLSTATE_LENGTH+1; } - (void) strmake(net->last_error,(char*) pos, - min((uint) len,(uint) sizeof(net->last_error)-1)); + (void) strmake(net->client_last_error,(char*) pos, + min((uint) len,(uint) sizeof(net->client_last_error)-1)); } else set_mysql_error(mysql, CR_UNKNOWN_ERROR, unknown_sqlstate); @@ -701,7 +701,9 @@ cli_safe_read(MYSQL *mysql) mysql->server_status&= ~SERVER_MORE_RESULTS_EXISTS; DBUG_PRINT("error",("Got error: %d/%s (%s)", - net->last_errno, net->sqlstate, net->last_error)); + net->client_last_errno, + net->sqlstate, + net->client_last_error)); return(packet_error); } return len; @@ -744,7 +746,6 @@ cli_advanced_command(MYSQL *mysql, enum enum_server_command command, } net_clear_error(net); - net->report_error=0; mysql->info=0; mysql->affected_rows= ~(my_ulonglong) 0; /* @@ -759,7 +760,7 @@ cli_advanced_command(MYSQL *mysql, enum enum_server_command command, { DBUG_PRINT("error",("Can't send command to server. Error: %d", socket_errno)); - if (net->last_errno == ER_NET_PACKET_TOO_LARGE) + if (net->client_last_errno == ER_NET_PACKET_TOO_LARGE) { set_mysql_error(mysql, CR_NET_PACKET_TOO_LARGE, unknown_sqlstate); goto end; @@ -836,16 +837,15 @@ static my_bool is_NT(void) #ifdef CHECK_LICENSE -/* +/** Check server side variable 'license'. - If the variable does not exist or does not contain 'Commercial', + + If the variable does not exist or does not contain 'Commercial', we're talking to non-commercial server from commercial client. - SYNOPSIS - check_license() - RETURN VALUE - 0 success - !0 network error or the server is not commercial. - Error code is saved in mysql->net.last_errno. + + @retval 0 success + @retval !0 network error or the server is not commercial. + Error code is saved in mysql->net.client_last_errno. */ static int check_license(MYSQL *mysql) @@ -858,7 +858,7 @@ static int check_license(MYSQL *mysql) if (mysql_real_query(mysql, query, sizeof(query)-1)) { - if (net->last_errno == ER_UNKNOWN_SYSTEM_VARIABLE) + if (net->client_last_errno == ER_UNKNOWN_SYSTEM_VARIABLE) { set_mysql_extended_error(mysql, CR_WRONG_LICENSE, unknown_sqlstate, ER(CR_WRONG_LICENSE), required_license); @@ -873,7 +873,7 @@ static int check_license(MYSQL *mysql) two is ever true for server variables now), or column value mismatch, set wrong license error. */ - if (!net->last_errno && + if (!net->client_last_errno && (!row || !row[0] || strncmp(row[0], required_license, sizeof(required_license)))) { @@ -881,7 +881,7 @@ static int check_license(MYSQL *mysql) ER(CR_WRONG_LICENSE), required_license); } mysql_free_result(res); - return net->last_errno; + return net->client_last_errno; } #endif /* CHECK_LICENSE */ @@ -2090,7 +2090,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, if ((pkt_length=cli_safe_read(mysql)) == packet_error) { - if (mysql->net.last_errno == CR_SERVER_LOST) + if (mysql->net.client_last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "reading initial communication packet", @@ -2324,7 +2324,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, if ((pkt_length=cli_safe_read(mysql)) == packet_error) { - if (mysql->net.last_errno == CR_SERVER_LOST) + if (mysql->net.client_last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "reading authorization packet", @@ -2352,7 +2352,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, /* Read what server thinks about out new auth message report */ if (cli_safe_read(mysql) == packet_error) { - if (mysql->net.last_errno == CR_SERVER_LOST) + if (mysql->net.client_last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "reading final connect information", @@ -2371,7 +2371,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, if (db && mysql_select_db(mysql, db)) { - if (mysql->net.last_errno == CR_SERVER_LOST) + if (mysql->net.client_last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "Setting intital database", @@ -2415,7 +2415,9 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, error: reset_sigpipe(mysql); DBUG_PRINT("error",("message: %u/%s (%s)", - net->last_errno, net->sqlstate, net->last_error)); + net->client_last_errno, + net->sqlstate, + net->client_last_error)); { /* Free alloced memory */ end_server(mysql); @@ -2473,8 +2475,8 @@ my_bool mysql_reconnect(MYSQL *mysql) mysql->db, mysql->port, mysql->unix_socket, mysql->client_flag | CLIENT_REMEMBER_OPTIONS)) { - mysql->net.last_errno= tmp_mysql.net.last_errno; - strmov(mysql->net.last_error, tmp_mysql.net.last_error); + mysql->net.client_last_errno= tmp_mysql.net.client_last_errno; + strmov(mysql->net.client_last_error, tmp_mysql.net.client_last_error); strmov(mysql->net.sqlstate, tmp_mysql.net.sqlstate); DBUG_RETURN(1); } @@ -2483,8 +2485,8 @@ my_bool mysql_reconnect(MYSQL *mysql) DBUG_PRINT("error", ("mysql_set_character_set() failed")); bzero((char*) &tmp_mysql.options,sizeof(tmp_mysql.options)); mysql_close(&tmp_mysql); - mysql->net.last_errno= tmp_mysql.net.last_errno; - strmov(mysql->net.last_error, tmp_mysql.net.last_error); + mysql->net.client_last_errno= tmp_mysql.net.client_last_errno; + strmov(mysql->net.client_last_error, tmp_mysql.net.client_last_error); strmov(mysql->net.sqlstate, tmp_mysql.net.sqlstate); DBUG_RETURN(1); } @@ -3077,13 +3079,13 @@ unsigned int STDCALL mysql_num_fields(MYSQL_RES *res) uint STDCALL mysql_errno(MYSQL *mysql) { - return mysql->net.last_errno; + return mysql->net.client_last_errno; } const char * STDCALL mysql_error(MYSQL *mysql) { - return mysql->net.last_error; + return mysql->net.client_last_error; } @@ -3152,7 +3154,7 @@ int STDCALL mysql_set_character_set(MYSQL *mysql, const char *cs_name) ER(CR_CANT_READ_CHARSET), cs_name, cs_dir_name); } charsets_dir= save_csdir; - return mysql->net.last_errno; + return mysql->net.client_last_errno; } diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index be75eff2575..d6e8df9f0ff 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -257,6 +257,10 @@ static void run_query(THD *thd, char *buf, char *end, thd->options&= ~OPTION_BIN_LOG; DBUG_PRINT("query", ("%s", thd->query)); + + DBUG_ASSERT(!thd->in_sub_stmt); + DBUG_ASSERT(!thd->prelocked_mode); + mysql_parse(thd, thd->query, thd->query_length, &found_semicolon); if (no_print_error && thd->is_slave_error) @@ -265,14 +269,27 @@ static void run_query(THD *thd, char *buf, char *end, Thd_ndb *thd_ndb= get_thd_ndb(thd); for (i= 0; no_print_error[i]; i++) if ((thd_ndb->m_error_code == no_print_error[i]) || - (thd->net.last_errno == (unsigned)no_print_error[i])) + (thd->main_da.sql_errno() == (unsigned) no_print_error[i])) break; if (!no_print_error[i]) sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d", - buf, thd->net.last_error, thd->net.last_errno, + buf, + thd->main_da.message(), + thd->main_da.sql_errno(), thd_ndb->m_error_code, (int) thd->is_error(), thd->is_slave_error); } + /* + XXX: this code is broken. mysql_parse()/mysql_reset_thd_for_next_command() + can not be called from within a statement, and + run_query() can be called from anywhere, including from within + a sub-statement. + This particular reset is a temporary hack to avoid an assert + for double assignment of the diagnostics area when run_query() + is called from ndbcluster_reset_logs(), which is called from + mysql_flush(). + */ + thd->main_da.reset_diagnostics_area(); thd->options= save_thd_options; thd->query_length= save_query_length; @@ -2301,8 +2318,8 @@ static int open_ndb_binlog_index(THD *thd, TABLE_LIST *tables, if (open_tables(thd, &tables, &counter, MYSQL_LOCK_IGNORE_FLUSH)) { sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'", - thd->net.last_errno, - thd->net.last_error ? thd->net.last_error : ""); + thd->main_da.sql_errno(), + thd->main_da.message()); thd->proc_info= save_proc_info; return -1; } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 1400d9da753..eca577cb6c8 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1753,6 +1753,7 @@ partition_element *ha_partition::find_partition_element(uint part_id) return part_elem; } DBUG_ASSERT(0); + my_error(ER_OUT_OF_RESOURCES, MYF(0)); current_thd->fatal_error(); // Abort return NULL; } diff --git a/sql/item_func.cc b/sql/item_func.cc index 101832b58a9..d219987e46b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2949,7 +2949,6 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, } } } - thd->net.last_error[0]=0; Udf_func_init init= u_d->func_init; if ((error=(uchar) init(&initid, &f_args, init_msg_buff))) { diff --git a/sql/log_event.cc b/sql/log_event.cc index 2b3037aedcc..00e3dc89f6b 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1611,7 +1611,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, if (killed_status_arg == THD::KILLED_NO_VALUE) killed_status_arg= thd_arg->killed; error_code= - (killed_status_arg == THD::NOT_KILLED) ? thd_arg->net.last_errno : + (killed_status_arg == THD::NOT_KILLED) ? + (thd_arg->is_error() ? thd_arg->main_da.sql_errno() : 0) : ((thd_arg->system_thread & SYSTEM_THREAD_DELAYED_INSERT) ? 0 : thd_arg->killed_errno()); @@ -2332,7 +2333,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query); } /* If the query was not ignored, it is printed to the general log */ - if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE) + if (!thd->is_error() || thd->main_da.sql_errno() != ER_SLAVE_IGNORED_TABLE) general_log_write(thd, COM_QUERY, thd->query, thd->query_length); compare_errors: @@ -2341,9 +2342,10 @@ compare_errors: If we expected a non-zero error code, and we don't get the same error code, and none of them should be ignored. */ - DBUG_PRINT("info",("expected_error: %d last_errno: %d", - expected_error, thd->net.last_errno)); - if ((expected_error != (actual_error= thd->net.last_errno)) && + actual_error= thd->is_error() ? thd->main_da.sql_errno() : 0; + DBUG_PRINT("info",("expected_error: %d sql_errno: %d", + expected_error, actual_error)); + if ((expected_error != actual_error) && expected_error && !ignored_error_code(actual_error) && !ignored_error_code(expected_error)) @@ -2355,7 +2357,7 @@ Error on master: '%s' (%d), Error on slave: '%s' (%d). \ Default database: '%s'. Query: '%s'", ER_SAFE(expected_error), expected_error, - actual_error ? thd->net.last_error: "no error", + actual_error ? thd->main_da.message() : "no error", actual_error, print_slave_db_safe(db), query_arg); thd->is_slave_error= 1; @@ -2377,7 +2379,7 @@ Default database: '%s'. Query: '%s'", { rli->report(ERROR_LEVEL, actual_error, "Error '%s' on query. Default database: '%s'. Query: '%s'", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message() : "unexpected success or fatal error"), print_slave_db_safe(thd->db), query_arg); thd->is_slave_error= 1; @@ -3720,8 +3722,11 @@ error: /* this err/sql_errno code is copy-paste from net_send_error() */ const char *err; int sql_errno; - if ((err=thd->net.last_error)[0]) - sql_errno=thd->net.last_errno; + if (thd->is_error()) + { + err= thd->main_da.message(); + sql_errno= thd->main_da.sql_errno(); + } else { sql_errno=ER_UNKNOWN_ERROR; @@ -6222,10 +6227,10 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - uint actual_error= thd->net.last_errno; + uint actual_error= thd->main_da.sql_errno(); rli->report(ERROR_LEVEL, actual_error, "Error '%s' in %s event: when locking tables", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message(): "unexpected success or fatal error"), get_type_str()); thd->is_fatal_error= 1; @@ -6266,10 +6271,10 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - uint actual_error= thd->net.last_errno; + uint actual_error= thd->main_da.sql_errno(); rli->report(ERROR_LEVEL, actual_error, "Error '%s' on reopening tables", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message() : "unexpected success or fatal error")); thd->is_slave_error= 1; } @@ -6425,10 +6430,11 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) break; default: - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, + thd->is_error() ? thd->main_da.sql_errno() : 0, "Error in %s event: row application failed. %s", get_type_str(), - thd->net.last_error ? thd->net.last_error : ""); + thd->is_error() ? thd->main_da.message() : ""); thd->is_slave_error= 1; break; } @@ -6475,12 +6481,13 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, + thd->is_error() ? thd->main_da.sql_errno() : 0, "Error in %s event: error during transaction execution " "on table %s.%s. %s", get_type_str(), table->s->db.str, table->s->table_name.str, - thd->net.last_error ? thd->net.last_error : ""); + thd->is_error() ? thd->main_da.message() : ""); /* If one day we honour --skip-slave-errors in row-based replication, and @@ -7094,10 +7101,10 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli) Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - uint actual_error= thd->net.last_errno; + uint actual_error= thd->main_da.sql_errno(); rli->report(ERROR_LEVEL, actual_error, "Error '%s' on opening table `%s`.`%s`", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message() : "unexpected success or fatal error"), table_list->db, table_list->table_name); thd->is_slave_error= 1; @@ -7625,8 +7632,11 @@ Write_rows_log_event::do_exec_row(const Relay_log_info *const rli) DBUG_ASSERT(m_table != NULL); int error= write_row(rli, TRUE /* overwrite */); - if (error && !thd->net.last_errno) - thd->net.last_errno= error; + if (error && !thd->is_error()) + { + DBUG_ASSERT(0); + my_error(ER_UNKNOWN_ERROR, MYF(0)); + } return error; } diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index c6b691ec010..6d5d86e42fe 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -74,10 +74,10 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - uint actual_error= thd->net.last_errno; + uint actual_error= thd->main_da.sql_errno(); rli->report(ERROR_LEVEL, actual_error, "Error '%s' in %s event: when locking tables", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message() : "unexpected success or fatal error"), ev->get_type_str()); thd->is_fatal_error= 1; @@ -118,10 +118,10 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - uint actual_error= thd->net.last_errno; + uint actual_error= thd->main_da.sql_errno(); rli->report(ERROR_LEVEL, actual_error, "Error '%s' on reopening tables", - (actual_error ? thd->net.last_error : + (actual_error ? thd->main_da.message() : "unexpected success or fatal error")); thd->is_slave_error= 1; } @@ -251,10 +251,10 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli break; default: - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), "Error in %s event: row application failed. %s", ev->get_type_str(), - thd->net.last_error ? thd->net.last_error : ""); + thd->is_error() ? thd->main_da.message() : ""); thd->is_slave_error= 1; break; } @@ -280,12 +280,12 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), "Error in %s event: error during transaction execution " "on table %s.%s. %s", ev->get_type_str(), table->s->db.str, table->s->table_name.str, - thd->net.last_error ? thd->net.last_error : ""); + thd->is_error() ? thd->main_da.message() : ""); /* If one day we honour --skip-slave-errors in row-based replication, and diff --git a/sql/mysqld.cc b/sql/mysqld.cc index fad2e5dcd22..1938602f372 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2587,21 +2587,8 @@ int my_message_sql(uint error, const char *str, myf MyFlags) MYSQL_ERROR::WARN_LEVEL_ERROR)) DBUG_RETURN(0); - if (thd->spcont && - thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd)) - { - DBUG_RETURN(0); - } - thd->is_slave_error= 1; // needed to catch query errors during replication - if (!thd->no_warnings_for_error) - { - thd->no_warnings_for_error= TRUE; - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str); - thd->no_warnings_for_error= FALSE; - } - /* thd->lex->current_select == 0 if lex structure is not inited (not query command (COM_QUERY)) @@ -2618,14 +2605,39 @@ int my_message_sql(uint error, const char *str, myf MyFlags) } else { - NET *net= &thd->net; - net->report_error= 1; - query_cache_abort(net); - if (!net->last_error[0]) // Return only first message + if (! thd->main_da.is_error()) // Return only first message { - strmake(net->last_error, str, sizeof(net->last_error)-1); - net->last_errno= error ? error : ER_UNKNOWN_ERROR; + if (error == 0) + error= ER_UNKNOWN_ERROR; + if (str == NULL) + str= ER(error); + thd->main_da.set_error_status(thd, error, str); } + query_cache_abort(&thd->net); + } + /* + If a continue handler is found, the error message will be cleared + by the stored procedures code. + */ + if (thd->spcont && + thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd)) + { + /* + Do not push any warnings, a handled error must be completely + silenced. + */ + DBUG_RETURN(0); + } + + if (!thd->no_warnings_for_error) + { + /* + Suppress infinite recursion if there a memory allocation error + inside push_warning. + */ + thd->no_warnings_for_error= TRUE; + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str); + thd->no_warnings_for_error= FALSE; } } if (!thd || MyFlags & ME_NOREFRESH) diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 18cf1ebae5b..73eb340fbc0 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -123,20 +123,18 @@ my_bool my_net_init(NET *net, Vio* vio) MYF(MY_WME)))) DBUG_RETURN(1); net->buff_end=net->buff+net->max_packet; - net->no_send_ok= net->no_send_error= 0; net->error=0; net->return_errno=0; net->return_status=0; net->pkt_nr=net->compress_pkt_nr=0; net->write_pos=net->read_pos = net->buff; - net->last_error[0]=0; + net->client_last_error[0]=0; net->compress=0; net->reading_or_writing=0; net->where_b = net->remain_in_buf=0; - net->last_errno=0; + net->client_last_errno=0; #ifdef USE_QUERY_CACHE query_cache_init_query(net); #else net->query_cache_query= 0; #endif - net->report_error= 0; if (vio != 0) /* If real connection */ { @@ -176,9 +174,12 @@ my_bool net_realloc(NET *net, size_t length) { DBUG_PRINT("error", ("Packet too large. Max size: %lu", net->max_packet_size)); + /* @todo: 1 and 2 codes are identical. */ net->error= 1; - net->report_error= 1; - net->last_errno= ER_NET_PACKET_TOO_LARGE; + net->client_last_errno= ER_NET_PACKET_TOO_LARGE; +#ifdef MYSQL_SERVER + my_error(ER_NET_PACKET_TOO_LARGE, MYF(0)); +#endif DBUG_RETURN(1); } pkt_length = (length+IO_SIZE-1) & ~(IO_SIZE-1); @@ -190,9 +191,10 @@ my_bool net_realloc(NET *net, size_t length) NET_HEADER_SIZE + COMP_HEADER_SIZE, MYF(MY_WME)))) { + /* @todo: 1 and 2 codes are identical. */ net->error= 1; - net->report_error= 1; - net->last_errno= ER_OUT_OF_RESOURCES; + net->client_last_errno= ER_OUT_OF_RESOURCES; + /* In the server the error is reported by MY_WME flag. */ DBUG_RETURN(1); } net->buff=net->write_pos=buff; @@ -582,12 +584,9 @@ net_real_write(NET *net,const uchar *packet, size_t len) if (!(b= (uchar*) my_malloc(len + NET_HEADER_SIZE + COMP_HEADER_SIZE, MYF(MY_WME)))) { -#ifdef MYSQL_SERVER - net->last_errno= ER_OUT_OF_RESOURCES; net->error= 2; - /* TODO is it needed to set this variable if we have no socket */ - net->report_error= 1; -#endif + net->client_last_errno= ER_OUT_OF_RESOURCES; + /* In the server, the error is reported by MY_WME flag. */ net->reading_or_writing= 0; DBUG_RETURN(1); } @@ -638,11 +637,11 @@ net_real_write(NET *net,const uchar *packet, size_t len) "%s: my_net_write: fcntl returned error %d, aborting thread\n", my_progname,vio_errno(net->vio)); #endif /* EXTRA_DEBUG */ -#ifdef MYSQL_SERVER - net->last_errno= ER_NET_ERROR_ON_WRITE; -#endif net->error= 2; /* Close socket */ - net->report_error= 1; + net->client_last_errno= ER_NET_PACKET_TOO_LARGE; +#ifdef MYSQL_SERVER + my_error(ER_NET_PACKET_TOO_LARGE, MYF(0)); +#endif goto end; } retry_count=0; @@ -669,10 +668,10 @@ net_real_write(NET *net,const uchar *packet, size_t len) } #endif /* defined(THREAD_SAFE_CLIENT) && !defined(MYSQL_SERVER) */ net->error= 2; /* Close socket */ - net->report_error= 1; + net->client_last_errno= (interrupted ? ER_NET_WRITE_INTERRUPTED : + ER_NET_ERROR_ON_WRITE); #ifdef MYSQL_SERVER - net->last_errno= (interrupted ? ER_NET_WRITE_INTERRUPTED : - ER_NET_ERROR_ON_WRITE); + my_error(net->client_last_errno, MYF(0)); #endif /* MYSQL_SERVER */ break; } @@ -849,9 +848,9 @@ my_real_read(NET *net, size_t *complen) #endif /* EXTRA_DEBUG */ len= packet_error; net->error= 2; /* Close socket */ - net->report_error= 1; + net->client_last_errno= ER_NET_FCNTL_ERROR; #ifdef MYSQL_SERVER - net->last_errno= ER_NET_FCNTL_ERROR; + my_error(ER_NET_FCNTL_ERROR, MYF(0)); #endif goto end; } @@ -881,10 +880,11 @@ my_real_read(NET *net, size_t *complen) remain, vio_errno(net->vio), (long) length)); len= packet_error; net->error= 2; /* Close socket */ - net->report_error= 1; + net->client_last_errno= (vio_was_interrupted(net->vio) ? + ER_NET_READ_INTERRUPTED : + ER_NET_READ_ERROR); #ifdef MYSQL_SERVER - net->last_errno= (vio_was_interrupted(net->vio) ? ER_NET_READ_INTERRUPTED : - ER_NET_READ_ERROR); + my_error(net->client_last_errno, MYF(0)); #endif goto end; } @@ -915,9 +915,9 @@ my_real_read(NET *net, size_t *complen) #endif } len= packet_error; - net->report_error= 1; + /* Not a NET error on the client. XXX: why? */ #ifdef MYSQL_SERVER - net->last_errno=ER_NET_PACKETS_OUT_OF_ORDER; + my_error(ER_NET_PACKETS_OUT_OF_ORDER, MYF(0)); #endif goto end; } @@ -1101,9 +1101,9 @@ my_net_read(NET *net) &complen)) { net->error= 2; /* caller will close socket */ - net->report_error= 1; + net->client_last_errno= ER_NET_UNCOMPRESS_ERROR; #ifdef MYSQL_SERVER - net->last_errno=ER_NET_UNCOMPRESS_ERROR; + my_error(ER_NET_UNCOMPRESS_ERROR, MYF(0)); #endif return packet_error; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index c67687b2bde..9b169bc8739 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1271,7 +1271,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) the storage engine calls in question happen to never fail with the existing storage engines. */ - thd->net.report_error= 1; /* purecov: inspected */ + my_error(ER_OUT_OF_RESOURCES, MYF(0)); /* purecov: inspected */ /* Caller will free the memory */ goto failure; /* purecov: inspected */ } diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 12ad504d738..91786ff3f4b 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -170,6 +170,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) if(error) { tl->table->file->print_error(error, MYF(0)); + tl->table->in_use->fatal_error(); return error; } count*= tl->table->file->stats.records; @@ -418,6 +419,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); + table->in_use->fatal_error(); return(error); } removed_tables|= table->map; diff --git a/sql/protocol.cc b/sql/protocol.cc index 713f4ed3d25..f7b3a496447 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -23,13 +23,20 @@ #endif #include "mysql_priv.h" -#include "sp_rcontext.h" #include static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; +/* Declared non-static only because of the embedded library. */ void net_send_error_packet(THD *thd, uint sql_errno, const char *err); +void +net_send_ok(THD *thd, + uint server_status, uint total_warn_count, + ha_rows affected_rows, ulonglong id, const char *message); +void +net_send_eof(THD *thd, uint server_status, uint total_warn_count); #ifndef EMBEDDED_LIBRARY -static void write_eof_packet(THD *thd, NET *net); +static void write_eof_packet(THD *thd, NET *net, + uint server_status, uint total_warn_count); #endif #ifndef EMBEDDED_LIBRARY @@ -68,58 +75,23 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length) void net_send_error(THD *thd, uint sql_errno, const char *err) { NET *net= &thd->net; - bool generate_warning= thd->killed != THD::KILL_CONNECTION; DBUG_ENTER("net_send_error"); - DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, - err ? err : net->last_error[0] ? - net->last_error : "NULL")); DBUG_ASSERT(!thd->spcont); + DBUG_ASSERT(sql_errno); + DBUG_ASSERT(err && err[0]); - if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) - { - thd->killed= THD::NOT_KILLED; - thd->mysys_var->abort= 0; - } + DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, err)); - if (net && net->no_send_error) - { - thd->clear_error(); - thd->is_fatal_error= 0; // Error message is given - DBUG_PRINT("info", ("sending error messages prohibited")); - DBUG_VOID_RETURN; - } - - thd->is_slave_error= 1; // needed to catch query errors during replication - if (!err) - { - if (sql_errno) - err=ER(sql_errno); - else - { - if ((err=net->last_error)[0]) - { - sql_errno=net->last_errno; - generate_warning= 0; // This warning has already been given - } - else - { - sql_errno=ER_UNKNOWN_ERROR; - err=ER(sql_errno); /* purecov: inspected */ - } - } - } - - if (generate_warning) - { - /* Error that we have not got with my_error() */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, err); - } + /* + It's one case when we can push an error even though there + is an OK or EOF already. + */ + thd->main_da.can_overwrite_status= TRUE; net_send_error_packet(thd, sql_errno, err); - thd->is_fatal_error= 0; // Error message is given - thd->net.report_error= 0; + thd->main_da.can_overwrite_status= FALSE; /* Abort multi-result sets */ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; @@ -149,23 +121,21 @@ void net_send_error(THD *thd, uint sql_errno, const char *err) warning_count Stored in 2 bytes; New in 4.1 protocol message Stored as packed length (1-9 bytes) + message Is not stored if no message - - If net->no_send_ok return without sending packet */ #ifndef EMBEDDED_LIBRARY void -send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) +net_send_ok(THD *thd, + uint server_status, uint total_warn_count, + ha_rows affected_rows, ulonglong id, const char *message) { NET *net= &thd->net; uchar buff[MYSQL_ERRMSG_SIZE+10],*pos; DBUG_ENTER("send_ok"); - if (net->no_send_ok || !net->vio) // hack for re-parsing queries + if (! net->vio) // hack for re-parsing queries { - DBUG_PRINT("info", ("no send ok: %s, vio present: %s", - (net->no_send_ok ? "YES" : "NO"), - (net->vio ? "YES" : "NO"))); + DBUG_PRINT("info", ("vio present: NO")); DBUG_VOID_RETURN; } @@ -178,28 +148,29 @@ send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) ("affected_rows: %lu id: %lu status: %u warning_count: %u", (ulong) affected_rows, (ulong) id, - (uint) (thd->server_status & 0xffff), - (uint) thd->total_warn_count)); - int2store(pos,thd->server_status); + (uint) (server_status & 0xffff), + (uint) total_warn_count)); + int2store(pos, server_status); pos+=2; /* We can only return up to 65535 warnings in two bytes */ - uint tmp= min(thd->total_warn_count, 65535); + uint tmp= min(total_warn_count, 65535); int2store(pos, tmp); pos+= 2; } else if (net->return_status) // For 4.0 protocol { - int2store(pos,thd->server_status); + int2store(pos, server_status); pos+=2; } - if (message) + thd->main_da.can_overwrite_status= TRUE; + + if (message && message[0]) pos= net_store_data(pos, (uchar*) message, strlen(message)); VOID(my_net_write(net, buff, (size_t) (pos-buff))); VOID(net_flush(net)); - /* We can't anymore send an error to the client */ - thd->net.report_error= 0; - thd->net.no_send_error= 1; + + thd->main_da.can_overwrite_status= FALSE; DBUG_PRINT("info", ("OK sent, so no more error sending allowed")); DBUG_VOID_RETURN; @@ -211,7 +182,7 @@ static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */ Send eof (= end of result set) to the client SYNOPSIS - send_eof() + net_send_eof() thd Thread handler no_flush Set to 1 if there will be more data to the client, like in send_fields(). @@ -230,15 +201,17 @@ static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */ */ void -send_eof(THD *thd) +net_send_eof(THD *thd, uint server_status, uint total_warn_count) { NET *net= &thd->net; - DBUG_ENTER("send_eof"); + DBUG_ENTER("net_send_eof"); + /* Set to TRUE if no active vio, to work well in case of --init-file */ if (net->vio != 0) { - write_eof_packet(thd, net); + thd->main_da.can_overwrite_status= TRUE; + write_eof_packet(thd, net, server_status, total_warn_count); VOID(net_flush(net)); - thd->net.no_send_error= 1; + thd->main_da.can_overwrite_status= FALSE; DBUG_PRINT("info", ("EOF sent, so no more error sending allowed")); } DBUG_VOID_RETURN; @@ -250,7 +223,9 @@ send_eof(THD *thd) write it to the network output buffer. */ -static void write_eof_packet(THD *thd, NET *net) +static void write_eof_packet(THD *thd, NET *net, + uint server_status, + uint total_warn_count) { if (thd->client_capabilities & CLIENT_PROTOCOL_41) { @@ -259,7 +234,7 @@ static void write_eof_packet(THD *thd, NET *net) Don't send warn count during SP execution, as the warn_list is cleared between substatements, and mysqltest gets confused */ - uint tmp= (thd->spcont ? 0 : min(thd->total_warn_count, 65535)); + uint tmp= min(total_warn_count, 65535); buff[0]= 254; int2store(buff+1, tmp); /* @@ -268,8 +243,8 @@ static void write_eof_packet(THD *thd, NET *net) other queries (see the if test in dispatch_command / COM_QUERY) */ if (thd->is_fatal_error) - thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; - int2store(buff+3, thd->server_status); + server_status&= ~SERVER_MORE_RESULTS_EXISTS; + int2store(buff + 3, server_status); VOID(my_net_write(net, buff, 5)); } else @@ -360,6 +335,96 @@ static uchar *net_store_length_fast(uchar *packet, uint length) return packet+2; } +/** + Send the status of the current statement execution over network. + + @param thd in fact, carries two parameters, NET for the transport and + Diagnostics_area as the source of status information. + + In MySQL, there are two types of SQL statements: those that return + a result set and those that return status information only. + + If a statement returns a result set, it consists of 3 parts: + - result set meta-data + - variable number of result set rows (can be 0) + - followed and terminated by EOF or ERROR packet + + Once the client has seen the meta-data information, it always + expects an EOF or ERROR to terminate the result set. If ERROR is + received, the result set rows are normally discarded (this is up + to the client implementation, libmysql at least does discard them). + EOF, on the contrary, means "successfully evaluated the entire + result set". Since we don't know how many rows belong to a result + set until it's evaluated, EOF/ERROR is the indicator of the end + of the row stream. Note, that we can not buffer result set rows + on the server -- there may be an arbitrary number of rows. But + we do buffer the last packet (EOF/ERROR) in the Diagnostics_area and + delay sending it till the very end of execution (here), to be able to + change EOF to an ERROR if commit failed or some other error occurred + during the last cleanup steps taken after execution. + + A statement that does not return a result set doesn't send result + set meta-data either. Instead it returns one of: + - OK packet + - ERROR packet. + Similarly to the EOF/ERROR of the previous statement type, OK/ERROR + packet is "buffered" in the diagnostics area and sent to the client + in the end of statement. + + @pre The diagnostics area is assigned or disabled. It can not be empty + -- we assume that every SQL statement or COM_* command + generates OK, ERROR, or EOF status. + + @post The status information is encoded to protocol format and sent to the + client. + + @return We conventionally return void, since the only type of error + that can happen here is a NET (transport) error, and that one + will become visible when we attempt to read from the NET the + next command. + Diagnostics_area::is_sent is set for debugging purposes only. +*/ + +void net_end_statement(THD *thd) +{ + DBUG_ASSERT(! thd->main_da.is_sent); + + /* Can not be true, but do not take chances in production. */ + if (thd->main_da.is_sent) + return; + + switch (thd->main_da.status()) { + case Diagnostics_area::DA_ERROR: + /* The query failed, send error to log and abort bootstrap. */ + net_send_error(thd, + thd->main_da.sql_errno(), + thd->main_da.message()); + break; + case Diagnostics_area::DA_EOF: + net_send_eof(thd, + thd->main_da.server_status(), + thd->main_da.total_warn_count()); + break; + case Diagnostics_area::DA_OK: + net_send_ok(thd, + thd->main_da.server_status(), + thd->main_da.total_warn_count(), + thd->main_da.affected_rows(), + thd->main_da.last_insert_id(), + thd->main_da.message()); + break; + case Diagnostics_area::DA_DISABLED: + break; + case Diagnostics_area::DA_EMPTY: + default: + DBUG_ASSERT(0); + net_send_ok(thd, thd->server_status, thd->total_warn_count, + 0, 0, NULL); + break; + } + thd->main_da.is_sent= TRUE; +} + /**************************************************************************** Functions used by the protocol functions (like send_ok) to store strings @@ -408,6 +473,17 @@ void Protocol::init(THD *thd_arg) #endif } +/** + Finish the result set with EOF packet, as is expected by the client, + if there is an error evaluating the next row and a continue handler + for the error. +*/ + +void Protocol::end_partial_result_set(THD *thd) +{ + net_send_eof(thd, thd->server_status, 0 /* no warnings, we're inside SP */); +} + bool Protocol::flush() { @@ -573,7 +649,14 @@ bool Protocol::send_fields(List *list, uint flags) } if (flags & SEND_EOF) - write_eof_packet(thd, &thd->net); + { + /* + Mark the end of meta-data result set, and store thd->server_status, + to show that there is no cursor. + Send no warning information, as it will be sent at statement end. + */ + write_eof_packet(thd, &thd->net, thd->server_status, thd->total_warn_count); + } DBUG_RETURN(prepare_for_send(list)); err: diff --git a/sql/protocol.h b/sql/protocol.h index 53584326f03..cd58ec93bbb 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -75,6 +75,7 @@ public: return 0; } virtual bool flush(); + virtual void end_partial_result_set(THD *thd); virtual void prepare_for_resend()=0; virtual bool store_null()=0; @@ -173,9 +174,7 @@ public: void send_warning(THD *thd, uint sql_errno, const char *err=0); void net_send_error(THD *thd, uint sql_errno=0, const char *err=0); -void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, - const char *info=0); -void send_eof(THD *thd); +void net_end_statement(THD *thd); bool send_old_password_request(THD *thd); uchar *net_store_data(uchar *to,const uchar *from, size_t length); uchar *net_store_data(uchar *to,int32 from); diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 834d87532af..e294cf8ae2d 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -685,7 +685,7 @@ int connect_to_master(THD *thd, MYSQL* mysql, Master_info* mi) if (!mi->host || !*mi->host) /* empty host */ { - strmov(mysql->net.last_error, "Master is not configured"); + strmov(mysql->net.client_last_error, "Master is not configured"); DBUG_RETURN(1); } mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout); @@ -880,6 +880,8 @@ bool load_master_data(THD* thd) cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } + /* Clear the result of mysql_create_db(). */ + thd->main_da.reset_diagnostics_area(); if (mysql_select_db(&mysql, db) || mysql_real_query(&mysql, STRING_WITH_LEN("SHOW TABLES")) || diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index ed0dc82cf01..eb32897f937 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -338,12 +338,13 @@ int prepare_record(const Slave_reporting_capability *const log, if (check && ((f->flags & mask) == mask)) { DBUG_ASSERT(log); - log->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, + error= ER_NO_DEFAULT_FOR_FIELD; + log->report(ERROR_LEVEL, error, "Field `%s` of table `%s`.`%s` " "has no default value and cannot be NULL", f->field_name, table->s->db.str, table->s->table_name.str); - error = ER_NO_DEFAULT_FOR_FIELD; + my_error(error, MYF(0), f->field_name); } else f->set_default(); diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 15d7d97affd..3467f6fd67c 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -166,7 +166,7 @@ int init_relay_log_info(Relay_log_info* rli, { sql_print_error("Failed to create a new relay log info file (\ file '%s', errno %d)", fname, my_errno); - msg= current_thd->net.last_error; + msg= current_thd->main_da.message(); goto err; } if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0, @@ -174,7 +174,7 @@ file '%s', errno %d)", fname, my_errno); { sql_print_error("Failed to create a cache on relay log info file '%s'", fname); - msg= current_thd->net.last_error; + msg= current_thd->main_da.message(); goto err; } diff --git a/sql/slave.cc b/sql/slave.cc index 0421c567c65..b1f25ee58da 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -981,17 +981,24 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, } thd->query= query; thd->is_slave_error = 0; - thd->net.no_send_ok = 1; bzero((char*) &tables,sizeof(tables)); tables.db = (char*)db; tables.alias= tables.table_name= (char*)table_name; /* Drop the table if 'overwrite' is true */ - if (overwrite && mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ + if (overwrite) { - sql_print_error("create_table_from_dump: failed to drop the table"); - goto err; + if (mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ + { + sql_print_error("create_table_from_dump: failed to drop the table"); + goto err; + } + else + { + /* Clear the OK result of mysql_rm_table(). */ + thd->main_da.reset_diagnostics_area(); + } } /* Create the table. We do not want to log the "create table" statement */ @@ -1012,6 +1019,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, if (thd->is_slave_error) goto err; // mysql_parse took care of the error send + thd->main_da.reset_diagnostics_area(); /* cleanup from CREATE_TABLE */ thd->proc_info = "Opening master dump table"; /* Note: If this function starts to fail for MERGE tables, @@ -1055,7 +1063,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, err: close_thread_tables(thd); - thd->net.no_send_ok = 0; DBUG_RETURN(error); } @@ -1107,7 +1114,6 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, error = 0; err: - thd->net.no_send_ok = 0; // Clear up garbage after create_table_from_dump if (!called_connected) mysql_close(mysql); if (errmsg && thd->vio_ok()) @@ -1721,26 +1727,31 @@ static int has_temporary_error(THD *thd) DBUG_ENTER("has_temporary_error"); if (thd->is_fatal_error) - { - DBUG_PRINT("info", ("thd->net.last_errno: %s", ER(thd->net.last_errno))); DBUG_RETURN(0); - } DBUG_EXECUTE_IF("all_errors_are_temporary_errors", - if (thd->net.last_errno) - thd->net.last_errno= ER_LOCK_DEADLOCK;); + if (thd->main_da.is_error()) + { + thd->clear_error(); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + }); + + /* + If there is no message in THD, we can't say if it's a temporary + error or not. This is currently the case for Incident_log_event, + which sets no message. Return FALSE. + */ + if (!thd->is_error()) + DBUG_RETURN(0); /* Temporary error codes: currently, InnoDB deadlock detected by InnoDB or lock wait timeout (innodb_lock_wait_timeout exceeded */ - if (thd->net.last_errno == ER_LOCK_DEADLOCK || - thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT) - { - DBUG_PRINT("info", ("thd->net.last_errno: %s", ER(thd->net.last_errno))); + if (thd->main_da.sql_errno() == ER_LOCK_DEADLOCK || + thd->main_da.sql_errno() == ER_LOCK_WAIT_TIMEOUT) DBUG_RETURN(1); - } #ifdef HAVE_NDB_BINLOG /* @@ -2551,20 +2562,21 @@ Slave SQL thread aborted. Can't execute init_slave query"); */ uint32 const last_errno= rli->last_error().number; - DBUG_PRINT("info", ("thd->net.last_errno=%d; rli->last_error.number=%d", - thd->net.last_errno, last_errno)); - if (thd->net.last_errno != 0) + if (thd->is_error()) { - char const *const errmsg= - thd->net.last_error ? thd->net.last_error : ""; + char const *const errmsg= thd->main_da.message(); + + DBUG_PRINT("info", + ("thd->main_da.sql_errno()=%d; rli->last_error.number=%d", + thd->main_da.sql_errno(), last_errno)); if (last_errno == 0) { - rli->report(ERROR_LEVEL, thd->net.last_errno, errmsg); + rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), errmsg); } - else if (last_errno != thd->net.last_errno) + else if (last_errno != thd->main_da.sql_errno()) { sql_print_error("Slave (additional info): %s Error_code: %d", - errmsg, thd->net.last_errno); + errmsg, thd->main_da.sql_errno()); } } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 4b432cef5cd..2e36593a126 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1202,12 +1202,6 @@ sp_head::execute(THD *thd) err_status= i->execute(thd, &ip); - /* - If this SP instruction have sent eof, it has caused no_send_error to be - set. Clear it back to allow the next instruction to send error. (multi- - statement execution code clears no_send_error between statements too) - */ - thd->net.no_send_error= 0; if (i->free_list) cleanup_items(i->free_list); @@ -2762,14 +2756,22 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) thd->query, thd->query_length) <= 0) { res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this); + + if (thd->main_da.is_eof()) + net_end_statement(thd); + + query_cache_end_of_result(thd); + if (!res && unlikely(thd->enable_slow_log)) log_slow_statement(thd); - query_cache_end_of_result(thd); } else *nextp= m_ip+1; thd->query= query; thd->query_length= query_length; + + if (!thd->is_error()) + thd->main_da.reset_diagnostics_area(); } DBUG_RETURN(res); } diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index 70a043c4fac..8395648689b 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -287,7 +287,6 @@ sp_rcontext::find_handler(THD *thd, uint sql_errno, sql_errno The error code level Warning level thd The current thread - - thd->net.report_error is an optional output. RETURN TRUE if a handler was found. @@ -298,7 +297,6 @@ sp_rcontext::handle_error(uint sql_errno, MYSQL_ERROR::enum_warning_level level, THD *thd) { - bool handled= FALSE; MYSQL_ERROR::enum_warning_level elevated_level= level; @@ -310,25 +308,7 @@ sp_rcontext::handle_error(uint sql_errno, elevated_level= MYSQL_ERROR::WARN_LEVEL_ERROR; } - if (find_handler(thd, sql_errno, elevated_level)) - { - if (elevated_level == MYSQL_ERROR::WARN_LEVEL_ERROR) - { - /* - Forces to abort the current instruction execution. - NOTE: This code is altering the original meaning of - the net.report_error flag (send an error to the client). - In the context of stored procedures with error handlers, - the flag is reused to cause error propagation, - until the error handler is reached. - No messages will be sent to the client in that context. - */ - thd->net.report_error= 1; - } - handled= TRUE; - } - - return handled; + return find_handler(thd, sql_errno, elevated_level); } void diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 519ca429aa6..9f6442d5c49 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -695,7 +695,7 @@ my_bool acl_reload(THD *thd) if (simple_open_n_lock_tables(thd, tables)) { sql_print_error("Fatal error: Can't open and lock privilege tables: %s", - thd->net.last_error); + thd->main_da.message()); goto end; } @@ -3800,11 +3800,11 @@ my_bool grant_reload(THD *thd) close_thread_tables(thd); /* - It is ok failing to load procs_priv table because we may be + It is OK failing to load procs_priv table because we may be working with 4.1 privilege tables. */ if (grant_reload_procs_priv(thd)) - my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "mysql.procs_priv"); + return_val= 1; rw_wrlock(&LOCK_grant); grant_version++; @@ -5693,9 +5693,6 @@ bool mysql_drop_user(THD *thd, List &list) if (result) my_error(ER_CANNOT_USER, MYF(0), "DROP USER", wrong_users.c_ptr_safe()); - DBUG_PRINT("info", ("thd->net.last_errno: %d", thd->net.last_errno)); - DBUG_PRINT("info", ("thd->net.last_error: %s", thd->net.last_error)); - write_bin_log(thd, FALSE, thd->query, thd->query_length); rw_unlock(&LOCK_grant); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ba8b7fc1330..b15082f1f6c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -491,9 +491,28 @@ static TABLE_SHARE int tmp; DBUG_ENTER("get_table_share_with_create"); - if ((share= get_table_share(thd, table_list, key, key_length, - db_flags, error)) || - thd->net.last_errno != ER_NO_SUCH_TABLE) + share= get_table_share(thd, table_list, key, key_length, db_flags, error); + /* + If share is not NULL, we found an existing share. + + If share is NULL, and there is no error, we're inside + pre-locking, which silences 'ER_NO_SUCH_TABLE' errors + with the intention to silently drop non-existing tables + from the pre-locking list. In this case we still need to try + auto-discover before returning a NULL share. + + If share is NULL and the error is ER_NO_SUCH_TABLE, this is + the same as above, only that the error was not silenced by + pre-locking. Once again, we need to try to auto-discover + the share. + + Finally, if share is still NULL, it's a real error and we need + to abort. + + @todo Rework alternative ways to deal with ER_NO_SUCH TABLE. + */ + if (share || thd->is_error() && thd->main_da.sql_errno() != ER_NO_SUCH_TABLE) + DBUG_RETURN(share); /* Table didn't exist. Check if some engine can provide it */ @@ -502,9 +521,13 @@ static TABLE_SHARE { /* No such table in any engine. - Hide "Table doesn't exist" errors if table belong to view + Hide "Table doesn't exist" errors if the table belongs to a view. + The check for thd->is_error() is necessary to not push an + unwanted error in case of pre-locking, which silences + "no such table" errors. + @todo Rework the alternative ways to deal with ER_NO_SUCH TABLE. */ - if (table_list->belong_to_view) + if (thd->is_error() && table_list->belong_to_view) { TABLE_LIST *view= table_list->belong_to_view; thd->clear_error(); diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index fa6aa8f5881..77c5155b41b 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -40,13 +40,6 @@ void mysql_client_binlog_statement(THD* thd) if (check_global_access(thd, SUPER_ACL)) DBUG_VOID_RETURN; - /* - Temporarily turn off send_ok, since different events handle this - differently - */ - my_bool nsok= thd->net.no_send_ok; - thd->net.no_send_ok= TRUE; - size_t coded_len= thd->lex->comment.length + 1; size_t decoded_len= base64_needed_decoded_length(coded_len); DBUG_ASSERT(coded_len > 0); @@ -193,20 +186,11 @@ void mysql_client_binlog_statement(THD* thd) } } - /* - Restore setting of no_send_ok - */ - thd->net.no_send_ok= nsok; DBUG_PRINT("info",("binlog base64 execution finished successfully")); send_ok(thd); end: - /* - Restore setting of no_send_ok - */ - thd->net.no_send_ok= nsok; - delete desc; my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_VOID_RETURN; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 56b048a4f9d..f1803a329c5 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1403,6 +1403,7 @@ def_week_frmt: %lu", thd->limit_found_rows = query->found_rows(); thd->status_var.last_query_cost= 0.0; + thd->main_da.disable_status(); BLOCK_UNLOCK_RD(query_block); DBUG_RETURN(1); // Result sent to client diff --git a/sql/sql_class.cc b/sql/sql_class.cc index f59a848a242..b68a532a2dc 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -351,6 +351,124 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, return thd->strmake(str.ptr(), str.length()); } +/** + Clear this diagnostics area. + + Normally called at the end of a statement. +*/ + +void +Diagnostics_area::reset_diagnostics_area() +{ +#ifdef DBUG_OFF + can_overwrite_status= FALSE; + /** Don't take chances in production */ + m_message[0]= '\0'; + m_sql_errno= 0; + m_server_status= 0; + m_affected_rows= 0; + m_last_insert_id= 0; + m_total_warn_count= 0; +#endif + is_sent= FALSE; + /** Tiny reset in debug mode to see garbage right away */ + m_status= DA_EMPTY; +} + + +/** + Set OK status -- ends commands that do not return a + result set, e.g. INSERT/UPDATE/DELETE. +*/ + +void +Diagnostics_area::set_ok_status(THD *thd, ha_rows affected_rows_arg, + ulong last_insert_id_arg, + const char *message_arg) +{ + DBUG_ASSERT(! is_set()); +#ifdef DBUG_OFF + /* In production, refuse to overwrite an error with an OK packet. */ + if (is_error()) + return; +#endif + /** Only allowed to report success if has not yet reported an error */ + + m_server_status= thd->server_status; + m_total_warn_count= thd->total_warn_count; + m_affected_rows= affected_rows_arg; + m_last_insert_id= last_insert_id_arg; + if (message_arg) + strmake(m_message, message_arg, sizeof(m_message)); + else + m_message[0]= '\0'; + m_status= DA_OK; +} + + +/** + Set EOF status. +*/ + +void +Diagnostics_area::set_eof_status(THD *thd) +{ + /** Only allowed to report eof if has not yet reported an error */ + + DBUG_ASSERT(! is_set()); +#ifdef DBUG_OFF + /* In production, refuse to overwrite an error with an EOF packet. */ + if (is_error()) + return; +#endif + + m_server_status= thd->server_status; + /* + If inside a stored procedure, do not return the total + number of warnings, since they are not available to the client + anyway. + */ + m_total_warn_count= thd->spcont ? 0 : thd->total_warn_count; + + m_status= DA_EOF; +} + +/** + Set ERROR status. +*/ + +void +Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg, + const char *message_arg) +{ + /* + Only allowed to report error if has not yet reported a success + The only exception is when we flush the message to the client, + an error can happen during the flush. + */ + DBUG_ASSERT(! is_set() || can_overwrite_status); + + m_sql_errno= sql_errno_arg; + strmake(m_message, message_arg, sizeof(m_message)); + + m_status= DA_ERROR; +} + + +/** + Mark the diagnostics area as 'DISABLED'. + + This is used in rare cases when the COM_ command at hand sends a response + in a custom format. One example is the query cache, another is + COM_STMT_PREPARE. +*/ + +void +Diagnostics_area::disable_status() +{ + DBUG_ASSERT(! is_set()); + m_status= DA_DISABLED; +} THD::THD() @@ -431,7 +549,6 @@ THD::THD() net.vio=0; #endif client_capabilities= 0; // minimalistic client - net.last_error[0]=0; // If error on boot #ifdef HAVE_QUERY_CACHE query_cache_init_query(&net); // If error on boot #endif @@ -1324,12 +1441,12 @@ void select_send::abort() { DBUG_ENTER("select_send::abort"); if (is_result_set_started && thd->spcont && - thd->spcont->find_handler(thd, thd->net.last_errno, + thd->spcont->find_handler(thd, thd->main_da.sql_errno(), MYSQL_ERROR::WARN_LEVEL_ERROR)) { /* We're executing a stored procedure, have an open result - set, an SQL exception conditiona and a handler for it. + set, an SQL exception condition and a handler for it. In this situation we must abort the current statement, silence the error and start executing the continue/exit handler. @@ -1337,9 +1454,7 @@ void select_send::abort() otherwise the client will hang due to the violation of the client/server protocol. */ - thd->net.report_error= 0; - send_eof(); - thd->net.report_error= 1; // Abort SP + thd->protocol->end_partial_result_set(thd); } DBUG_VOID_RETURN; } @@ -1391,12 +1506,14 @@ bool select_send::send_data(List &items) } } thd->sent_row_count++; - if (!thd->vio_ok()) - DBUG_RETURN(0); - if (! thd->is_error()) + if (thd->is_error()) + { + protocol->remove_last_row(); + DBUG_RETURN(1); + } + if (thd->vio_ok()) DBUG_RETURN(protocol->write()); - protocol->remove_last_row(); - DBUG_RETURN(1); + DBUG_RETURN(0); } bool select_send::send_eof() @@ -1414,14 +1531,9 @@ bool select_send::send_eof() mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - if (! thd->is_error()) - { - ::send_eof(thd); - is_result_set_started= 0; - return 0; - } - else - return 1; + ::send_eof(thd); + is_result_set_started= 0; + return FALSE; } @@ -2701,7 +2813,6 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, { backup->options= options; backup->in_sub_stmt= in_sub_stmt; - backup->no_send_ok= net.no_send_ok; backup->enable_slow_log= enable_slow_log; backup->limit_found_rows= limit_found_rows; backup->examined_row_count= examined_row_count; @@ -2732,9 +2843,6 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, cuted_fields= 0; transaction.savepoints= 0; first_successful_insert_id_in_cur_stmt= 0; - - /* Surpress OK packets in case if we will execute statements */ - net.no_send_ok= TRUE; } @@ -2757,7 +2865,6 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup) transaction.savepoints= backup->savepoints; options= backup->options; in_sub_stmt= backup->in_sub_stmt; - net.no_send_ok= backup->no_send_ok; enable_slow_log= backup->enable_slow_log; first_successful_insert_id_in_prev_stmt= backup->first_successful_insert_id_in_prev_stmt; diff --git a/sql/sql_class.h b/sql/sql_class.h index c5b70cfa687..d46557acb4c 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -913,7 +913,6 @@ public: uint in_sub_stmt; bool enable_slow_log; bool last_insert_id_used; - my_bool no_send_ok; SAVEPOINT *savepoints; }; @@ -975,6 +974,123 @@ public: }; +/** + Stores status of the currently executed statement. + Cleared at the beginning of the statement, and then + can hold either OK, ERROR, or EOF status. + Can not be assigned twice per statement. +*/ + +class Diagnostics_area +{ +public: + enum enum_diagnostics_status + { + /** The area is cleared at start of a statement. */ + DA_EMPTY= 0, + /** Set whenever one calls send_ok(). */ + DA_OK, + /** Set whenever one calls send_eof(). */ + DA_EOF, + /** Set whenever one calls my_error() or my_message(). */ + DA_ERROR, + /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */ + DA_DISABLED + }; + /** True if status information is sent to the client. */ + bool is_sent; + /** Set to make set_error_status after set_{ok,eof}_status possible. */ + bool can_overwrite_status; + + void set_ok_status(THD *thd, ha_rows affected_rows_arg, + ulong last_insert_id_arg, + const char *message); + void set_eof_status(THD *thd); + void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg); + + void disable_status(); + + void reset_diagnostics_area(); + + bool is_set() const { return m_status != DA_EMPTY; } + bool is_error() const { return m_status == DA_ERROR; } + bool is_eof() const { return m_status == DA_EOF; } + bool is_ok() const { return m_status == DA_OK; } + bool is_disabled() const { return m_status == DA_DISABLED; } + enum_diagnostics_status status() const { return m_status; } + + const char *message() const + { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; } + + uint sql_errno() const + { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; } + + uint server_status() const + { + DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF); + return m_server_status; + } + + ha_rows affected_rows() const + { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; } + + ulong last_insert_id() const + { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; } + + uint total_warn_count() const + { + DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF); + return m_total_warn_count; + } + + Diagnostics_area() { reset_diagnostics_area(); } + +private: + /** Message buffer. Can be used by OK or ERROR status. */ + char m_message[MYSQL_ERRMSG_SIZE]; + /** + SQL error number. One of ER_ codes from share/errmsg.txt. + Set by set_error_status. + */ + uint m_sql_errno; + + /** + Copied from thd->server_status when the diagnostics area is assigned. + We need this member as some places in the code use the following pattern: + thd->server_status|= ... + send_eof(thd); + thd->server_status&= ~... + Assigned by OK, EOF or ERROR. + */ + uint m_server_status; + /** + The number of rows affected by the last statement. This is + semantically close to thd->row_count_func, but has a different + life cycle. thd->row_count_func stores the value returned by + function ROW_COUNT() and is cleared only by statements that + update its value, such as INSERT, UPDATE, DELETE and few others. + This member is cleared at the beginning of the next statement. + + We could possibly merge the two, but life cycle of thd->row_count_func + can not be changed. + */ + ha_rows m_affected_rows; + /** + Similarly to the previous member, this is a replacement of + thd->first_successful_insert_id_in_prev_stmt, which is used + to implement LAST_INSERT_ID(). + */ + ulong m_last_insert_id; + /** The total number of warnings. */ + uint m_total_warn_count; + enum_diagnostics_status m_status; + /** + @todo: the following THD members belong here: + - warn_list, warn_count, + */ +}; + + /** @class THD For each client connection we create a separate thread with THD serving as @@ -1400,6 +1516,7 @@ public: List warn_list; uint warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END]; uint total_warn_count; + Diagnostics_area main_da; /* Id of current query. Statement can be reused to execute several queries query_id is global in context of the whole MySQL server. @@ -1714,12 +1831,18 @@ public: CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length); int send_explain_fields(select_result *result); #ifndef EMBEDDED_LIBRARY + /** + Clear the current error, if any. + We do not clear is_fatal_error or is_fatal_sub_stmt_error since we + assume this is never called if the fatal error is set. + @todo: To silence an error, one should use Internal_error_handler + mechanism. In future this function will be removed. + */ inline void clear_error() { DBUG_ENTER("clear_error"); - net.last_error[0]= 0; - net.last_errno= 0; - net.report_error= 0; + if (main_da.is_error()) + main_da.reset_diagnostics_area(); is_slave_error= 0; DBUG_VOID_RETURN; } @@ -1728,10 +1851,14 @@ public: void clear_error(); inline bool vio_ok() const { return true; } #endif + /** + Mark the current error as fatal. Warning: this does not + set any error, it sets a property of the error, so must be + followed or prefixed with my_error(). + */ inline void fatal_error() { is_fatal_error= 1; - net.report_error= 1; DBUG_PRINT("error",("Fatal error set")); } /** @@ -1747,7 +1874,7 @@ public: To raise this flag, use my_error(). */ - inline bool is_error() const { return net.report_error; } + inline bool is_error() const { return main_da.is_error(); } inline CHARSET_INFO *charset() { return variables.character_set_client; } void update_charset(); @@ -1971,6 +2098,24 @@ private: }; +/** A short cut for thd->main_da.set_ok_status(). */ + +inline void +send_ok(THD *thd, ha_rows affected_rows= 0, ulonglong id= 0, + const char *message= NULL) +{ + thd->main_da.set_ok_status(thd, affected_rows, id, message); +} + + +/** A short cut for thd->main_da.set_eof_status(). */ + +inline void +send_eof(THD *thd) +{ + thd->main_da.set_eof_status(thd); +} + #define tmp_disable_binlog(A) \ {ulonglong tmp_disable_binlog__save_options= (A)->options; \ (A)->options&= ~OPTION_BIN_LOG @@ -2508,6 +2653,7 @@ public: void send_error(uint errcode,const char *err); int do_deletes(); bool send_eof(); + virtual void abort(); }; @@ -2550,6 +2696,7 @@ public: void send_error(uint errcode,const char *err); int do_updates (bool from_send_error); bool send_eof(); + virtual void abort(); }; class my_var : public Sql_alloc { diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 76237576764..309a1c7ab5d 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -948,19 +948,20 @@ bool setup_connection_thread_globals(THD *thd) bool login_connection(THD *thd) { NET *net= &thd->net; + int error; DBUG_ENTER("login_connection"); DBUG_PRINT("info", ("login_connection called by thread %lu", thd->thread_id)); - net->no_send_error= 0; - /* Use "connect_timeout" value during connection phase */ my_net_set_read_timeout(net, connect_timeout); my_net_set_write_timeout(net, connect_timeout); - if (check_connection(thd)) + error= check_connection(thd); + net_end_statement(thd); + + if (error) { // Wrong permissions - net_send_error(thd); #ifdef __NT__ if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) my_sleep(1000); /* must wait after eof() */ @@ -989,13 +990,12 @@ void end_connection(THD *thd) if (thd->user_connect) decrease_user_connections(thd->user_connect); - if (thd->killed || - net->error && net->vio != 0 && thd->is_error()) + if (thd->killed || net->error && net->vio != 0) { statistic_increment(aborted_threads,&LOCK_status); } - if (net->error && net->vio != 0 && thd->is_error()) + if (net->error && net->vio != 0) { if (!thd->killed && thd->variables.log_warnings > 1) { @@ -1005,11 +1005,9 @@ void end_connection(THD *thd) thd->thread_id,(thd->db ? thd->db : "unconnected"), sctx->user ? sctx->user : "unauthenticated", sctx->host_or_ip, - (net->last_errno ? ER(net->last_errno) : + (thd->main_da.is_error() ? thd->main_da.message() : ER(ER_UNKNOWN_ERROR))); } - - net_send_error(thd, net->last_errno, NullS); } } @@ -1045,24 +1043,14 @@ static void prepare_new_connection_state(THD* thd) if (sys_init_connect.value_length && !(sctx->master_access & SUPER_ACL)) { execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); - /* - execute_init_command calls net_send_error. - If there was an error during execution of the init statements, - the error at this moment is present in thd->net.last_error and also - thd->is_slave_error and thd->net.report_error are set. - net_send_error sends the contents of thd->net.last_error and - clears thd->net.report_error. It doesn't, however, clean - thd->is_slave_error or thd->net.last_error. Here we make use of this - fact. - */ - if (thd->is_slave_error) + if (thd->is_error()) { thd->killed= THD::KILL_CONNECTION; sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), thd->thread_id,(thd->db ? thd->db : "unconnected"), sctx->user ? sctx->user : "unauthenticated", sctx->host_or_ip, "init_connect command failed"); - sql_print_warning("%s", thd->net.last_error); + sql_print_warning("%s", thd->main_da.message()); } thd->proc_info=0; thd->set_time(); @@ -1129,7 +1117,6 @@ pthread_handler_t handle_one_connection(void *arg) while (!net->error && net->vio != 0 && !(thd->killed == THD::KILL_CONNECTION)) { - net->no_send_error= 0; if (do_command(thd)) break; } diff --git a/sql/sql_db.cc b/sql/sql_db.cc index ad4e0d803eb..d425510320e 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -921,6 +921,8 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { ha_drop_database(path); query_cache_invalidate1(db); + (void) sp_drop_db_routines(thd, db); /* @todo Do not ignore errors */ + Events::drop_schema_events(thd, db); error = 0; } } @@ -956,6 +958,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) /* These DDL methods and logging protected with LOCK_mysql_create_db */ mysql_bin_log.write(&qinfo); } + thd->clear_error(); thd->server_status|= SERVER_STATUS_DB_DROPPED; send_ok(thd, (ulong) deleted); thd->server_status&= ~SERVER_STATUS_DB_DROPPED; @@ -999,8 +1002,6 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } exit: - (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ - Events::drop_schema_events(thd, db); /* If this database was the client's selected database, we silently change the client's selected database to nothing (to have an empty diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 509e736f6e7..a73963d7f86 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -728,6 +728,14 @@ void multi_delete::send_error(uint errcode,const char *err) /* First send error what ever it is ... */ my_message(errcode, err, MYF(0)); + DBUG_VOID_RETURN; +} + + +void multi_delete::abort() +{ + DBUG_ENTER("multi_delete::abort"); + /* the error was handled or nothing deleted and no side effects return */ if (error_handled || !thd->transaction.stmt.modified_non_trans_table && !deleted) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index ea7545fe5cb..10b42e11b26 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -147,8 +147,9 @@ exit: /* Hide "Unknown column" or "Unknown function" error */ if (orig_table_list->view) { - if (thd->net.last_errno == ER_BAD_FIELD_ERROR || - thd->net.last_errno == ER_SP_DOES_NOT_EXIST) + if (thd->is_error() && + (thd->main_da.sql_errno() == ER_BAD_FIELD_ERROR || + thd->main_da.sql_errno() == ER_SP_DOES_NOT_EXIST)) { thd->clear_error(); my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db, diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 425c5a33329..051950a0974 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1916,7 +1916,7 @@ bool delayed_get_table(THD *thd, TABLE_LIST *table_list) main thread. Use of my_message will enable stored procedures continue handlers. */ - my_message(di->thd.net.last_errno, di->thd.net.last_error, + my_message(di->thd.main_da.sql_errno(), di->thd.main_da.message(), MYF(0)); } di->unlock(); @@ -1993,7 +1993,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) goto error; if (dead) { - my_message(thd.net.last_errno, thd.net.last_error, MYF(0)); + my_message(thd.main_da.sql_errno(), thd.main_da.message(), MYF(0)); goto error; } } @@ -2252,7 +2252,9 @@ pthread_handler_t handle_delayed_insert(void *arg) #if !defined( __WIN__) /* Win32 calls this in pthread_create */ if (my_thread_init()) { - strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES)); + /* Can't use my_error since store_globals has not yet been called */ + thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES, + ER(ER_OUT_OF_RESOURCES)); goto end; } #endif @@ -2261,8 +2263,10 @@ pthread_handler_t handle_delayed_insert(void *arg) thd->thread_stack= (char*) &thd; if (init_thr_lock() || thd->store_globals()) { + /* Can't use my_error since store_globals has perhaps failed */ + thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES, + ER(ER_OUT_OF_RESOURCES)); thd->fatal_error(); - strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES)); goto err; } @@ -2665,7 +2669,7 @@ bool Delayed_insert::handle_inserts(void) { /* This should never happen */ table->file->print_error(error,MYF(0)); - sql_print_error("%s",thd.net.last_error); + sql_print_error("%s", thd.main_da.message()); DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop")); goto err; } @@ -2706,7 +2710,7 @@ bool Delayed_insert::handle_inserts(void) if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { // This shouldn't happen table->file->print_error(error,MYF(0)); - sql_print_error("%s",thd.net.last_error); + sql_print_error("%s", thd.main_da.message()); DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop")); goto err; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index df425557bdf..58084bdd067 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -328,7 +328,6 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var, */ save_vio= thd->net.vio; thd->net.vio= 0; - thd->net.no_send_error= 0; dispatch_command(COM_QUERY, thd, init_command_var->value, init_command_var->value_length); @@ -397,8 +396,8 @@ pthread_handler_t handle_bootstrap(void *arg) /* purecov: begin tested */ if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) { - net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS); - thd->fatal_error(); + net_end_statement(thd); + bootstrap_error= 1; break; } buff= (char*) thd->net.buff; @@ -406,7 +405,7 @@ pthread_handler_t handle_bootstrap(void *arg) length+= (ulong) strlen(buff + length); /* purecov: end */ } - if (thd->is_fatal_error) + if (bootstrap_error) break; /* purecov: inspected */ while (length && (my_isspace(thd->charset(), buff[length-1]) || @@ -433,16 +432,11 @@ pthread_handler_t handle_bootstrap(void *arg) mysql_parse(thd, thd->query, length, & found_semicolon); close_thread_tables(thd); // Free tables - if (thd->is_fatal_error) - break; + bootstrap_error= thd->is_error(); + net_end_statement(thd); - if (thd->is_error()) - { - /* The query failed, send error to log and abort bootstrap */ - net_send_error(thd); - thd->fatal_error(); + if (bootstrap_error) break; - } free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); #ifdef USING_TRANSACTIONS @@ -451,9 +445,6 @@ pthread_handler_t handle_bootstrap(void *arg) } end: - /* Remember the exit code of bootstrap */ - bootstrap_error= thd->is_fatal_error; - net_end(&thd->net); thd->cleanup(); delete thd; @@ -712,7 +703,12 @@ bool do_command(THD *thd) */ my_net_set_read_timeout(net, thd->variables.net_wait_timeout); + /* + XXX: this code is here only to clear possible errors of init_connect. + Consider moving to init_connect() instead. + */ thd->clear_error(); // Clear error message + thd->main_da.reset_diagnostics_area(); net_new_transaction(net); if ((packet_length=my_net_read(net)) == packet_error) @@ -723,10 +719,13 @@ bool do_command(THD *thd) /* Check if we can continue without closing the connection */ + /* The error must be set. */ + DBUG_ASSERT(thd->is_error()); + net_end_statement(thd); + if (net->error != 3) DBUG_RETURN(TRUE); // We have to close it. - net_send_error(thd, net->last_errno, NullS); net->error= 0; DBUG_RETURN(FALSE); } @@ -860,7 +859,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, db.length= db_len; tbl_name= strmake(db.str, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - mysql_table_dump(thd, &db, tbl_name); + if (mysql_table_dump(thd, &db, tbl_name) == 0) + thd->main_da.disable_status(); break; } case COM_CHANGE_USER: @@ -1024,7 +1024,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, while (!thd->killed && found_semicolon && ! thd->is_error()) { char *next_packet= (char*) found_semicolon; - net->no_send_error= 0; + + net_end_statement(thd); + query_cache_end_of_result(thd); /* Multiple queries exits, execute them individually */ @@ -1125,6 +1127,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* We don't calculate statistics for this command */ general_log_print(thd, command, NullS); net->error=0; // Don't give 'abort' message + thd->main_da.disable_status(); // Don't send anything back error=TRUE; // End server break; @@ -1241,16 +1244,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, DBUG_PRINT("quit",("Got shutdown command for level %u", level)); general_log_print(thd, command, NullS); send_eof(thd); -#ifdef __WIN__ - sleep(1); // must wait after eof() -#endif - /* - The client is next going to send a COM_QUIT request (as part of - mysql_close()). Make the life simpler for the client by sending - the response for the coming COM_QUIT in advance - */ - send_eof(thd); - close_connection(thd, 0, 1); close_thread_tables(thd); // Free before kill kill_mysql(); error=TRUE; @@ -1263,13 +1256,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ulong uptime; uint length; ulonglong queries_per_second1000; -#ifndef EMBEDDED_LIBRARY char buff[250]; uint buff_len= sizeof(buff); -#else - char *buff= thd->net.last_error; - uint buff_len= sizeof(thd->net.last_error); -#endif general_log_print(thd, command, NullS); status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS]); @@ -1291,6 +1279,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, cached_open_tables(), (uint) (queries_per_second1000 / 1000), (uint) (queries_per_second1000 % 1000)); +#ifdef EMBEDDED_LIBRARY + /* Store the buffer in permanent memory */ + send_ok(thd, 0, 0, buff); +#endif #ifdef SAFEMALLOC if (sf_malloc_cur_memory) // Using SAFEMALLOC { @@ -1303,7 +1295,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif #ifndef EMBEDDED_LIBRARY VOID(my_net_write(net, (uchar*) buff, length)); - VOID(net_flush(net)); + VOID(net_flush(net)); + thd->main_da.disable_status(); #endif break; } @@ -1383,10 +1376,19 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->transaction.xid_state.xid.null(); /* report error issued during command execution */ - if (thd->killed_errno() && ! thd->is_error()) - thd->send_kill_message(); - if (thd->is_error()) - net_send_error(thd); + if (thd->killed_errno()) + { + if (! thd->main_da.is_set()) + thd->send_kill_message(); + } + if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) + { + thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; + } + + net_end_statement(thd); + query_cache_end_of_result(thd); log_slow_statement(thd); @@ -1756,7 +1758,6 @@ mysql_execute_command(THD *thd) SELECT_LEX_UNIT *unit= &lex->unit; /* Saved variable value */ DBUG_ENTER("mysql_execute_command"); - thd->net.no_send_error= 0; #ifdef WITH_PARTITION_STORAGE_ENGINE thd->work_part_info= 0; #endif @@ -2992,13 +2993,9 @@ end_with_restore_list: SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | OPTION_SETUP_TABLES_DONE, del_result, unit, select_lex); - res|= thd->net.report_error; - if (unlikely(res)) - { - /* If we had a another error reported earlier then this will be ignored */ - del_result->send_error(ER_UNKNOWN_ERROR, "Execution of the query failed"); + res|= thd->is_error(); + if (res) del_result->abort(); - } delete del_result; } else @@ -3931,8 +3928,6 @@ create_sp_error: goto error; } - my_bool save_no_send_ok= thd->net.no_send_ok; - thd->net.no_send_ok= TRUE; if (sp->m_flags & sp_head::MULTI_RESULTS) { if (! (thd->client_capabilities & CLIENT_MULTI_RESULTS)) @@ -3942,7 +3937,6 @@ create_sp_error: back */ my_error(ER_SP_BADSELECT, MYF(0), sp->m_qname.str); - thd->net.no_send_ok= save_no_send_ok; goto error; } /* @@ -3958,7 +3952,6 @@ create_sp_error: if (check_routine_access(thd, EXECUTE_ACL, sp->m_db.str, sp->m_name.str, TRUE, FALSE)) { - thd->net.no_send_ok= save_no_send_ok; goto error; } #endif @@ -3983,7 +3976,6 @@ create_sp_error: thd->variables.select_limit= select_limit; - thd->net.no_send_ok= save_no_send_ok; thd->server_status&= ~bits_to_be_cleared; if (!res) @@ -4624,7 +4616,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_YES, str.ptr()); } - result->send_eof(); + if (res) + result->abort(); + else + result->send_eof(); delete result; } else @@ -5251,6 +5246,7 @@ void mysql_reset_thd_for_next_command(THD *thd) { DBUG_ENTER("mysql_reset_thd_for_next_command"); DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */ + DBUG_ASSERT(! thd->in_sub_stmt); thd->free_list= 0; thd->select_number= 1; /* @@ -5277,18 +5273,18 @@ void mysql_reset_thd_for_next_command(THD *thd) } DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx); thd->thread_specific_used= FALSE; - if (!thd->in_sub_stmt) + + if (opt_bin_log) { - if (opt_bin_log) - { - reset_dynamic(&thd->user_var_events); - thd->user_var_events_alloc= thd->mem_root; - } - thd->clear_error(); - thd->total_warn_count=0; // Warnings for this query - thd->rand_used= 0; - thd->sent_row_count= thd->examined_row_count= 0; + reset_dynamic(&thd->user_var_events); + thd->user_var_events_alloc= thd->mem_root; } + thd->clear_error(); + thd->main_da.reset_diagnostics_area(); + thd->total_warn_count=0; // Warnings for this query + thd->rand_used= 0; + thd->sent_row_count= thd->examined_row_count= 0; + /* Because we come here only for start of top-statements, binlog format is constant inside a complex statement (using stored functions) etc. @@ -5526,7 +5522,6 @@ void mysql_parse(THD *thd, const char *inBuf, uint length, /* Actually execute the query */ lex->set_trg_event_type_for_tables(); mysql_execute_command(thd); - query_cache_end_of_result(thd); } } } @@ -6378,8 +6373,10 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } if (thd) { - (void)acl_reload(thd); - (void)grant_reload(thd); + if (acl_reload(thd)) + result= 1; + if (grant_reload(thd)) + result= 1; } if (tmp_thd) { @@ -6496,7 +6493,6 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, if (reset_master(thd)) { result=1; - thd->fatal_error(); // Ensure client get error } } #endif diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 64f96f342df..8ded0cacd77 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -2068,6 +2068,7 @@ char *generate_partition_syntax(partition_info *part_info, default: DBUG_ASSERT(0); /* We really shouldn't get here, no use in continuing from here */ + my_error(ER_OUT_OF_RESOURCES, MYF(0)); current_thd->fatal_error(); DBUG_RETURN(NULL); } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 9d4d62e57b6..52e6fcc5d58 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -230,6 +230,8 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) NET *net= &stmt->thd->net; uchar buff[12]; uint tmp; + int error; + THD *thd= stmt->thd; DBUG_ENTER("send_prep_stmt"); buff[0]= 0; /* OK packet indicator */ @@ -244,11 +246,16 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) Send types and names of placeholders to the client XXX: fix this nasty upcast from List to List */ - DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || - (stmt->param_count && - stmt->thd->protocol_text.send_fields((List *) - &stmt->lex->param_list, - Protocol::SEND_EOF))); + error= my_net_write(net, buff, sizeof(buff)); + if (stmt->param_count && ! error) + { + error= thd->protocol_text.send_fields((List *) + &stmt->lex->param_list, + Protocol::SEND_EOF); + } + /* Flag that a response has already been sent */ + thd->main_da.disable_status(); + DBUG_RETURN(error); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -259,6 +266,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, thd->client_stmt_id= stmt->id; thd->client_param_count= stmt->param_count; thd->clear_error(); + thd->main_da.disable_status(); return 0; } @@ -2526,6 +2534,8 @@ void mysql_stmt_close(THD *thd, char *packet) DBUG_ASSERT(! (stmt->flags & (uint) Prepared_statement::IS_IN_USE)); (void) stmt->deallocate(); + thd->main_da.disable_status(); + DBUG_VOID_RETURN; } @@ -2590,6 +2600,8 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) DBUG_ENTER("mysql_stmt_get_longdata"); status_var_increment(thd->status_var.com_stmt_send_long_data); + + thd->main_da.disable_status(); #ifndef EMBEDDED_LIBRARY /* Minimal size of long data packet is 6 bytes */ if (packet_length < MYSQL_LONG_DATA_HEADER) @@ -2664,11 +2676,7 @@ bool Select_fetch_protocol_binary::send_fields(List &list, uint flags) bool Select_fetch_protocol_binary::send_eof() { - Protocol *save_protocol= thd->protocol; - - thd->protocol= &protocol; ::send_eof(thd); - thd->protocol= save_protocol; return FALSE; } @@ -3097,7 +3105,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) thd->query_length) <= 0) { error= mysql_execute_command(thd); - query_cache_end_of_result(thd); } } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 952b94e6b6d..cadc27638bd 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -898,7 +898,6 @@ JOIN::optimize() } if (res > 1) { - thd->fatal_error(); error= res; DBUG_PRINT("error",("Error from opt_sum_query")); DBUG_RETURN(1); @@ -14454,6 +14453,7 @@ calc_group_buffer(JOIN *join,ORDER *group) default: /* This case should never be choosen */ DBUG_ASSERT(0); + my_error(ER_OUT_OF_RESOURCES, MYF(0)); join->thd->fatal_error(); } } diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 602c289a605..8203ca92eed 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -239,7 +239,7 @@ bool servers_reload(THD *thd) if (simple_open_n_lock_tables(thd, tables)) { sql_print_error("Can't open and lock privilege tables: %s", - thd->net.last_error); + thd->main_da.message()); goto end; } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 9a7d7c59af3..36f9cc780bd 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -578,7 +578,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) /* Only one table for now, but VIEW can involve several tables */ if (open_normal_and_derived_tables(thd, table_list, 0)) { - if (!table_list->view || thd->net.last_errno != ER_VIEW_INVALID) + if (!table_list->view || + thd->is_error() && thd->main_da.sql_errno() != ER_VIEW_INVALID) DBUG_RETURN(TRUE); /* @@ -786,10 +787,9 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) } restore_record(table, s->default_values); // Get empty record table->use_all_columns(); - if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS | - Protocol::SEND_EOF)) + if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS)) DBUG_VOID_RETURN; - thd->protocol->flush(); + send_eof(thd); DBUG_VOID_RETURN; } @@ -2919,7 +2919,7 @@ static int fill_schema_table_names(THD *thd, TABLE *table, default: DBUG_ASSERT(0); } - if (thd->net.last_errno == ER_NO_SUCH_TABLE) + if (thd->is_error() && thd->main_da.sql_errno() == ER_NO_SUCH_TABLE) { thd->clear_error(); return 0; @@ -3267,8 +3267,16 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) res= open_normal_and_derived_tables(thd, show_table_list, MYSQL_LOCK_IGNORE_FLUSH); lex->sql_command= save_sql_command; - - if (thd->net.last_errno == ER_NO_SUCH_TABLE) + /* + XXX: show_table_list has a flag i_is_requested, + and when it's set, open_normal_and_derived_tables() + can return an error without setting an error message + in THD, which is a hack. This is why we have to + check for res, then for thd->is_error() only then + for thd->main_da.sql_errno(). + */ + if (res && thd->is_error() && + thd->main_da.sql_errno() == ER_NO_SUCH_TABLE) { /* Hide error for not existing table. @@ -3422,7 +3430,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, /* there was errors during opening tables */ - const char *error= thd->net.last_error; + const char *error= thd->main_da.message(); if (tables->view) table->field[3]->store(STRING_WITH_LEN("VIEW"), cs); else if (tables->schema_table) @@ -3624,7 +3632,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables, rather than in SHOW COLUMNS */ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); res= 0; } @@ -4098,9 +4106,9 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS rather than in SHOW KEYS */ - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); res= 0; } @@ -4290,9 +4298,9 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables, if (schema_table_store_record(thd, table)) DBUG_RETURN(1); - if (res && thd->net.last_errno) + if (res && thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); } if (res) thd->clear_error(); @@ -4323,9 +4331,9 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, DBUG_ENTER("get_schema_constraints_record"); if (res) { - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); DBUG_RETURN(0); } @@ -4428,9 +4436,9 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables, */ if (res) { - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); DBUG_RETURN(0); } @@ -4511,9 +4519,9 @@ static int get_schema_key_column_usage_record(THD *thd, DBUG_ENTER("get_schema_key_column_usage_record"); if (res) { - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); DBUG_RETURN(0); } @@ -4706,9 +4714,9 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, if (res) { - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); DBUG_RETURN(0); } @@ -4751,6 +4759,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, break; default: DBUG_ASSERT(0); + my_error(ER_OUT_OF_RESOURCES, MYF(0)); current_thd->fatal_error(); DBUG_RETURN(1); } @@ -5243,9 +5252,9 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables, if (res) { - if (thd->net.last_errno) + if (thd->is_error()) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->net.last_errno, thd->net.last_error); + thd->main_da.sql_errno(), thd->main_da.message()); thd->clear_error(); DBUG_RETURN(0); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c618d170fb7..7bcb79f8429 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1662,7 +1662,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, !dont_log_query); if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) && (if_exists || table_type == NULL)) + { error= 0; + thd->clear_error(); + } if (error == HA_ERR_ROW_IS_REFERENCED) { /* the table is referenced by a foreign key constraint */ @@ -4191,18 +4194,22 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, (table->table->file->ha_check_for_upgrade(check_opt) == HA_ADMIN_NEEDS_ALTER)) { - my_bool save_no_send_ok= thd->net.no_send_ok; DBUG_PRINT("admin", ("recreating table")); ha_autocommit_or_rollback(thd, 1); close_thread_tables(thd); tmp_disable_binlog(thd); // binlogging is done by caller if wanted - thd->net.no_send_ok= TRUE; result_code= mysql_recreate_table(thd, table); - thd->net.no_send_ok= save_no_send_ok; reenable_binlog(thd); + /* + mysql_recreate_table() can push OK or ERROR. + Clear 'OK' status. If there is an error, keep it: + we will store the error message in a result set row + and then clear. + */ + if (thd->main_da.is_ok()) + thd->main_da.reset_diagnostics_area(); goto send_result; } - } DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name)); @@ -4296,7 +4303,6 @@ send_result_message: case HA_ADMIN_TRY_ALTER: { - my_bool save_no_send_ok= thd->net.no_send_ok; /* This is currently used only by InnoDB. ha_innobase::optimize() answers "try with alter", so here we close the table, do an ALTER TABLE, @@ -4308,10 +4314,16 @@ send_result_message: *save_next_global= table->next_global; table->next_local= table->next_global= 0; tmp_disable_binlog(thd); // binlogging is done by caller if wanted - thd->net.no_send_ok= TRUE; result_code= mysql_recreate_table(thd, table); - thd->net.no_send_ok= save_no_send_ok; reenable_binlog(thd); + /* + mysql_recreate_table() can push OK or ERROR. + Clear 'OK' status. If there is an error, keep it: + we will store the error message in a result set row + and then clear. + */ + if (thd->main_da.is_ok()) + thd->main_da.reset_diagnostics_area(); ha_autocommit_or_rollback(thd, 0); close_thread_tables(thd); if (!result_code) // recreation went ok @@ -4322,9 +4334,10 @@ send_result_message: } if (result_code) // either mysql_recreate_table or analyze failed { - const char *err_msg; - if ((err_msg= thd->net.last_error)) + DBUG_ASSERT(thd->is_error()); + if (thd->is_error()) { + const char *err_msg= thd->main_da.message(); if (!thd->vio_ok()) { sql_print_error(err_msg); @@ -4340,6 +4353,7 @@ send_result_message: protocol->store(table_name, system_charset_info); protocol->store(operator_name, system_charset_info); } + thd->clear_error(); } } result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index ecb7acda61b..1e9b7da1c81 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1696,7 +1696,11 @@ void multi_update::send_error(uint errcode,const char *err) { /* First send error what ever it is ... */ my_error(errcode, MYF(0), err); +} + +void multi_update::abort() +{ /* the error was handled or nothing deleted and no side effects return */ if (error_handled || !thd->transaction.stmt.modified_non_trans_table && !updated) diff --git a/sql/table.cc b/sql/table.cc index 2143faaff5c..133b08ba703 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3275,31 +3275,32 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type) } -/* +/** Hide errors which show view underlying table information - SYNOPSIS - TABLE_LIST::hide_view_error() - thd thread handler + @param[in,out] thd thread handler + @pre This method can be called only if there is an error. */ void TABLE_LIST::hide_view_error(THD *thd) { /* Hide "Unknown column" or "Unknown function" error */ - if (thd->net.last_errno == ER_BAD_FIELD_ERROR || - thd->net.last_errno == ER_SP_DOES_NOT_EXIST || - thd->net.last_errno == ER_PROCACCESS_DENIED_ERROR || - thd->net.last_errno == ER_COLUMNACCESS_DENIED_ERROR || - thd->net.last_errno == ER_TABLEACCESS_DENIED_ERROR || - thd->net.last_errno == ER_TABLE_NOT_LOCKED || - thd->net.last_errno == ER_NO_SUCH_TABLE) + DBUG_ASSERT(thd->is_error()); + + if (thd->main_da.sql_errno() == ER_BAD_FIELD_ERROR || + thd->main_da.sql_errno() == ER_SP_DOES_NOT_EXIST || + thd->main_da.sql_errno() == ER_PROCACCESS_DENIED_ERROR || + thd->main_da.sql_errno() == ER_COLUMNACCESS_DENIED_ERROR || + thd->main_da.sql_errno() == ER_TABLEACCESS_DENIED_ERROR || + thd->main_da.sql_errno() == ER_TABLE_NOT_LOCKED || + thd->main_da.sql_errno() == ER_NO_SUCH_TABLE) { TABLE_LIST *top= top_table(); - thd->clear_error(); + thd->clear_error(); my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str); } - else if (thd->net.last_errno == ER_NO_DEFAULT_FOR_FIELD) + else if (thd->main_da.sql_errno() == ER_NO_DEFAULT_FOR_FIELD) { TABLE_LIST *top= top_table(); thd->clear_error(); diff --git a/sql/tztime.cc b/sql/tztime.cc index 920f8e87d13..f080c61e243 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1642,7 +1642,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) if (open_system_tables_for_read(thd, tz_tables, &open_tables_state_backup)) { sql_print_warning("Can't open and lock time zone table: %s " - "trying to live without them", thd->net.last_error); + "trying to live without them", thd->main_da.message()); /* We will try emulate that everything is ok */ return_val= time_zone_tables_exist= 0; goto end_with_setting_default_tz; -- cgit v1.2.1 From 4fbdff0fc843381e78dba31e3a81e15d863f3993 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 12 Dec 2007 13:05:32 -0500 Subject: Bug #29993 Default value of log_output should be 'FILE', not 'TABLE' Ensure default logging is "file" logging, for additional ways the option can be turned on. Verified on OS X, but not tested automatically. --- sql/mysqld.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index fad2e5dcd22..172a6442c0f 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -478,7 +478,7 @@ ulong thread_id=1L,current_pid; ulong slow_launch_threads = 0, sync_binlog_period; ulong expire_logs_days = 0; ulong rpl_recovery_rank=0; -const char *log_output_str= "TABLE"; +const char *log_output_str= "FILE"; time_t server_start_time; @@ -7483,7 +7483,7 @@ mysqld_get_one_option(int optid, { if (!argument || !argument[0]) { - log_output_options= LOG_TABLE; + log_output_options= LOG_FILE; log_output_str= log_output_typelib.type_names[1]; } else -- cgit v1.2.1 From e5a397e28f7db0b356d1eddd669591b76fa1c1f8 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 12 Dec 2007 19:44:14 -0200 Subject: Bug#32395 Alter table under a impending global read lock causes a server crash The problem is that some DDL statements (ALTER TABLE, CREATE TRIGGER, FLUSH TABLES, ...) when under LOCK TABLES need to momentarily drop the lock, reopen the table and grab the write lock again (using reopen_tables). When grabbing the lock again, reopen_tables doesn't pass a flag to mysql_lock_tables in order to ignore the impending global read lock, which causes a assertion because LOCK_open is being hold. Also dropping the lock must not signal to any threads that the table has been relinquished (related to the locking/flushing protocol). The solution is to correct the way the table is reopenned and the locks grabbed. When reopening the table and under LOCK TABLES, the table version should be set to 0 so other threads have to wait for the table. When grabbing the lock, any other flush should be ignored because it's theoretically a atomic operation. The chosen solution also fixes a potential discrepancy between binlog and GRL (global read lock) because table placeholders were being ignored, now a FLUSH TABLES WITH READ LOCK will properly for table with open placeholders. It's also important to mention that this patch doesn't fix a potential deadlock if one uses two GRLs under LOCK TABLES concurrently. mysql-test/r/lock_multi.result: Add test case result for Bug#32395 mysql-test/r/trigger_notembedded.result: Add test case result for Bug#32395 mysql-test/t/lock_multi.test: Add test case for Bug#32395 mysql-test/t/trigger_notembedded.test: Enable test case for Bug#32395 sql/ha_ndbcluster.cc: Update close_cached_tables usage. sql/ha_ndbcluster_binlog.cc: Update close_cached_tables usage. sql/mysql_priv.h: Update close_cache_tables prototype. sql/set_var.cc: Update close_cached_tables usage and set flag to wait for tables with placeholders. This is one of the places where a GRL can be obtained. sql/sql_base.cc: Preserve old version for write locked tables and ignore pending flushes and update close_cache_tables to take into account name locked tables. sql/sql_parse.cc: Update close_cached_tables usage and pass flag so that name locked tables are waited for. sql/sql_table.cc: Protect the table against a impending GRL if under LOCK TABLES. --- mysql-test/r/lock_multi.result | 30 ++++++++++ mysql-test/r/trigger_notembedded.result | 14 +++++ mysql-test/t/lock_multi.test | 98 ++++++++++++++++++++++++++++++++- mysql-test/t/trigger_notembedded.test | 6 +- sql/ha_ndbcluster.cc | 4 +- sql/ha_ndbcluster_binlog.cc | 12 ++-- sql/mysql_priv.h | 3 +- sql/set_var.cc | 2 +- sql/sql_base.cc | 69 +++++++++++++++-------- sql/sql_parse.cc | 7 ++- sql/sql_table.cc | 2 +- 11 files changed, 206 insertions(+), 41 deletions(-) diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index 4a0f70a7b88..9c4f1b17dcc 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -113,4 +113,34 @@ handler t1 open; ERROR HY000: Table storage engine for 't1' doesn't have this option --> client 1 drop table t1; +drop table if exists t1; +create table t1 (i int); +connection: default +lock tables t1 write; +connection: flush +flush tables with read lock;; +connection: default +alter table t1 add column j int; +connection: insert +insert into t1 values (1,2);; +connection: default +unlock tables; +connection: flush +select * from t1; +i j +unlock tables; +select * from t1; +i j +1 2 +drop table t1; +drop table if exists t1; +create table t1 (i int); +connection: default +lock tables t1 write; +connection: flush +flush tables with read lock;; +connection: default +flush tables; +unlock tables; +drop table t1; End of 5.1 tests diff --git a/mysql-test/r/trigger_notembedded.result b/mysql-test/r/trigger_notembedded.result index d56f83993a6..87e8f68da38 100644 --- a/mysql-test/r/trigger_notembedded.result +++ b/mysql-test/r/trigger_notembedded.result @@ -448,4 +448,18 @@ DROP TABLE t1; DROP DATABASE mysqltest_db1; USE test; End of 5.0 tests. +drop table if exists t1; +create table t1 (i int); +connection: default +lock tables t1 write; +connection: flush +flush tables with read lock;; +connection: default +create trigger t1_bi before insert on t1 for each row begin end; +unlock tables; +connection: flush +unlock tables; +select * from t1; +i +drop table t1; End of 5.1 tests. diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index b2266c9bff1..0d36b79df78 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -150,7 +150,7 @@ send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1; connection locker; let $wait_condition= select count(*) = 1 from information_schema.processlist - where state = "Locked" and info = + where state = "Waiting for table" and info = "SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1"; --source include/wait_condition.inc # Make test case independent from earlier grants. @@ -343,4 +343,100 @@ handler t1 open; connection default; drop table t1; +# +# Bug#32395 Alter table under a impending global read lock causes a server crash +# + +# +# Test ALTER TABLE under LOCK TABLES and FLUSH TABLES WITH READ LOCK +# + +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (i int); +connect (flush,localhost,root,,test,,); +connection default; +--echo connection: default +lock tables t1 write; +connection flush; +--echo connection: flush +--send flush tables with read lock; +connection default; +--echo connection: default +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Flushing tables"; +--source include/wait_condition.inc +alter table t1 add column j int; +connect (insert,localhost,root,,test,,); +connection insert; +--echo connection: insert +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Flushing tables"; +--source include/wait_condition.inc +--send insert into t1 values (1,2); +--echo connection: default +connection default; +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Waiting for release of readlock"; +--source include/wait_condition.inc +unlock tables; +connection flush; +--echo connection: flush +--reap +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Waiting for release of readlock"; +--source include/wait_condition.inc +select * from t1; +unlock tables; +connection insert; +--reap +connection default; +select * from t1; +drop table t1; +disconnect flush; +disconnect insert; + +# +# Test that FLUSH TABLES under LOCK TABLES protects write locked tables +# from a impending FLUSH TABLES WITH READ LOCK +# + +--disable_warnings +drop table if exists t1; +--enable_warnings +create table t1 (i int); +connect (flush,localhost,root,,test,,); +connection default; +--echo connection: default +lock tables t1 write; +connection flush; +--echo connection: flush +--send flush tables with read lock; +connection default; +--echo connection: default +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Flushing tables"; +--source include/wait_condition.inc +flush tables; +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = "Flushing tables"; +--source include/wait_condition.inc +unlock tables; +let $wait_condition= + select count(*) = 0 from information_schema.processlist + where state = "Flushing tables"; +--source include/wait_condition.inc +connection flush; +--reap +connection default; +disconnect flush; +drop table t1; + --echo End of 5.1 tests diff --git a/mysql-test/t/trigger_notembedded.test b/mysql-test/t/trigger_notembedded.test index 748ae6e1c27..5d2ab84adaf 100644 --- a/mysql-test/t/trigger_notembedded.test +++ b/mysql-test/t/trigger_notembedded.test @@ -880,8 +880,9 @@ USE test; # Bug#23713 LOCK TABLES + CREATE TRIGGER + FLUSH TABLES WITH READ LOCK = deadlock # -# Test temporarily disable due to Bug#32395 ---disable_parsing +--disable_warnings +drop table if exists t1; +--enable_warnings create table t1 (i int); connect (flush,localhost,root,,test,,); connection default; @@ -906,6 +907,5 @@ connection default; select * from t1; drop table t1; disconnect flush; ---enable_parsing --echo End of 5.1 tests. diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bf2b19bfc9c..2555bb23825 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -577,7 +577,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) bzero((char*) &table_list,sizeof(table_list)); table_list.db= m_dbname; table_list.alias= table_list.table_name= m_tabname; - close_cached_tables(thd, 0, &table_list); + close_cached_tables(thd, &table_list, FALSE, FALSE, FALSE); break; } default: @@ -8064,7 +8064,7 @@ int handle_trailing_share(NDB_SHARE *share) table_list.db= share->db; table_list.alias= table_list.table_name= share->table_name; safe_mutex_assert_owner(&LOCK_open); - close_cached_tables(thd, 0, &table_list, TRUE); + close_cached_tables(thd, &table_list, TRUE, FALSE, FALSE); pthread_mutex_lock(&ndbcluster_mutex); /* ndb_share reference temporary free */ diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index be75eff2575..a9d1615a1b4 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -883,7 +883,7 @@ int ndbcluster_setup_binlog_table_shares(THD *thd) { if (ndb_extra_logging) sql_print_information("NDB Binlog: ndb tables writable"); - close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE); + close_cached_tables(NULL, NULL, TRUE, FALSE, FALSE); } pthread_mutex_unlock(&LOCK_open); /* Signal injector thread that all is setup */ @@ -1683,7 +1683,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, bzero((char*) &table_list,sizeof(table_list)); table_list.db= (char *)dbname; table_list.alias= table_list.table_name= (char *)tabname; - close_cached_tables(thd, 0, &table_list, TRUE); + close_cached_tables(thd, &table_list, TRUE, FALSE, FALSE); if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table, 1))) @@ -1789,7 +1789,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, bzero((char*) &table_list,sizeof(table_list)); table_list.db= (char *)dbname; table_list.alias= table_list.table_name= (char *)tabname; - close_cached_tables(thd, 0, &table_list); + close_cached_tables(thd, &table_list, FALSE, FALSE, FALSE); /* ndb_share reference create free */ DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", share->key, share->use_count)); @@ -1908,7 +1908,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, bzero((char*) &table_list,sizeof(table_list)); table_list.db= schema->db; table_list.alias= table_list.table_name= schema->name; - close_cached_tables(thd, 0, &table_list, FALSE); + close_cached_tables(thd, &table_list, FALSE, FALSE, FALSE); } /* ndb_share reference temporary free */ if (share) @@ -2032,7 +2032,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, pthread_mutex_unlock(&ndb_schema_share_mutex); /* end protect ndb_schema_share */ - close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, FALSE); + close_cached_tables(NULL, NULL, FALSE, FALSE, FALSE); // fall through case NDBEVENT::TE_ALTER: ndb_handle_schema_change(thd, ndb, pOp, tmp_share); @@ -2189,7 +2189,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd, bzero((char*) &table_list,sizeof(table_list)); table_list.db= schema->db; table_list.alias= table_list.table_name= schema->name; - close_cached_tables(thd, 0, &table_list, FALSE); + close_cached_tables(thd, &table_list, FALSE, FALSE, FALSE); } if (schema_type != SOT_ALTER_TABLE) break; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index cafb7487e35..46e08191480 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1595,7 +1595,8 @@ TABLE *open_performance_schema_table(THD *thd, TABLE_LIST *one_table, Open_tables_state *backup); void close_performance_schema_table(THD *thd, Open_tables_state *backup); -bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); +bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock, + bool wait_for_refresh, bool wait_for_placeholders); bool close_cached_connection_tables(THD *thd, bool wait_for_refresh, LEX_STRING *connect_string, bool have_lock = FALSE); diff --git a/sql/set_var.cc b/sql/set_var.cc index d408b2de64e..e397eccbe8b 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3685,7 +3685,7 @@ bool sys_var_opt_readonly::update(THD *thd, set_var *var) can cause to wait on a read lock, it's required for the client application to unlock everything, and acceptable for the server to wait on all locks. */ - if (result= close_cached_tables(thd, true, NULL, false)) + if (result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE)) goto end_with_read_lock; if (result= make_global_read_lock_block_commit(thd)) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ba8b7fc1330..fd8e107b85a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -130,7 +130,7 @@ void table_cache_free(void) DBUG_ENTER("table_cache_free"); if (table_def_inited) { - close_cached_tables((THD*) 0,0,(TABLE_LIST*) 0); + close_cached_tables(NULL, NULL, FALSE, FALSE, FALSE); if (!open_cache.records) // Safety first hash_free(&open_cache); } @@ -885,16 +885,24 @@ void free_io_cache(TABLE *table) /* Close all tables which aren't in use by any thread - THD can be NULL, but then if_wait_for_refresh must be FALSE - and tables must be NULL. + @param thd Thread context + @param tables List of tables to remove from the cache + @param have_lock If LOCK_open is locked + @param wait_for_refresh Wait for a impending flush + @param wait_for_placeholders Wait for tables being reopened so that the GRL + won't proceed while write-locked tables are being reopened by other + threads. + + @remark THD can be NULL, but then wait_for_refresh must be FALSE + and tables must be NULL. */ -bool close_cached_tables(THD *thd, bool if_wait_for_refresh, - TABLE_LIST *tables, bool have_lock) +bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock, + bool wait_for_refresh, bool wait_for_placeholders) { bool result=0; DBUG_ENTER("close_cached_tables"); - DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables)); + DBUG_ASSERT(thd || (!wait_for_refresh && !tables)); if (!have_lock) VOID(pthread_mutex_lock(&LOCK_open)); @@ -918,7 +926,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, } DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu", refresh_version)); - if (if_wait_for_refresh) + if (wait_for_refresh) { /* Other threads could wait in a loop in open_and_lock_tables(), @@ -975,13 +983,13 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, found=1; } if (!found) - if_wait_for_refresh=0; // Nothing to wait for + wait_for_refresh=0; // Nothing to wait for } #ifndef EMBEDDED_LIBRARY if (!tables) kill_delayed_threads(); #endif - if (if_wait_for_refresh) + if (wait_for_refresh) { /* If there is any table that has a lower refresh_version, wait until @@ -1004,6 +1012,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); + /* Avoid a self-deadlock. */ + if (table->in_use == thd) + continue; /* Note that we wait here only for tables which are actually open, and not for placeholders with TABLE::open_placeholder set. Waiting for @@ -1018,7 +1029,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, are employed by CREATE TABLE as in this case table simply does not exist yet. */ - if (table->needs_reopen_or_name_lock() && table->db_stat) + if (table->needs_reopen_or_name_lock() && (table->db_stat || + (table->open_placeholder && wait_for_placeholders))) { found=1; DBUG_PRINT("signal", ("Waiting for COND_refresh")); @@ -1037,11 +1049,18 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, thd->in_lock_tables=0; /* Set version for table */ for (TABLE *table=thd->open_tables; table ; table= table->next) - table->s->version= refresh_version; + { + /* + Preserve the version (0) of write locked tables so that a impending + global read lock won't sneak in. + */ + if (table->reginfo.lock_type < TL_WRITE_ALLOW_WRITE) + table->s->version= refresh_version; + } } if (!have_lock) VOID(pthread_mutex_unlock(&LOCK_open)); - if (if_wait_for_refresh) + if (wait_for_refresh) { pthread_mutex_lock(&thd->mysys_var->mutex); thd->mysys_var->current_mutex= 0; @@ -1068,10 +1087,10 @@ bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh, DBUG_ASSERT(thd); bzero(&tmp, sizeof(TABLE_LIST)); - + if (!have_lock) VOID(pthread_mutex_lock(&LOCK_open)); - + for (idx= 0; idx < table_def_cache.records; idx++) { TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx); @@ -1100,11 +1119,11 @@ bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh, } if (tables) - result= close_cached_tables(thd, FALSE, tables, TRUE); - + result= close_cached_tables(thd, tables, TRUE, FALSE, FALSE); + if (!have_lock) VOID(pthread_mutex_unlock(&LOCK_open)); - + if (if_wait_for_refresh) { pthread_mutex_lock(&thd->mysys_var->mutex); @@ -2204,7 +2223,7 @@ void wait_for_condition(THD *thd, pthread_mutex_t *mutex, pthread_cond_t *cond) current thread. @param thd current thread context - @param tables able list containing one table to open. + @param tables table list containing one table to open. @return FALSE on success, TRUE otherwise. */ @@ -3272,8 +3291,8 @@ static bool reattach_merge(THD *thd, TABLE **err_tables_p) @param thd Thread context @param get_locks Should we get locks after reopening tables ? - @param in_refresh Are we in FLUSH TABLES ? TODO: It seems that - we can remove this parameter. + @param mark_share_as_old Mark share as old to protect from a impending + global read lock. @note Since this function can't properly handle prelocking and create placeholders it should be used in very special @@ -3287,13 +3306,17 @@ static bool reattach_merge(THD *thd, TABLE **err_tables_p) @return FALSE in case of success, TRUE - otherwise. */ -bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) +bool reopen_tables(THD *thd, bool get_locks, bool mark_share_as_old) { TABLE *table,*next,**prev; TABLE **tables,**tables_ptr; // For locks TABLE *err_tables= NULL; bool error=0, not_used; bool merge_table_found= FALSE; + const uint flags= MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN | + MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK | + MYSQL_LOCK_IGNORE_FLUSH; + DBUG_ENTER("reopen_tables"); if (!thd->open_tables) @@ -3354,7 +3377,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) /* Do not handle locks of MERGE children. */ if (get_locks && !db_stat && !table->parent) *tables_ptr++= table; // need new lock on this - if (in_refresh) + if (mark_share_as_old) { table->s->version=0; table->open_placeholder= 0; @@ -3387,7 +3410,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) */ thd->some_tables_deleted=0; if ((lock= mysql_lock_tables(thd, tables, (uint) (tables_ptr - tables), - MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN, ¬_used))) + flags, ¬_used))) { thd->locked_tables=mysql_lock_merge(thd->locked_tables,lock); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 6e84551716d..de8698d7bca 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6530,8 +6530,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, tmp_write_to_binlog= 0; if (lock_global_read_lock(thd)) return 1; // Killed - result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, - tables); + result= close_cached_tables(thd, tables, FALSE, (options & REFRESH_FAST) ? + FALSE : TRUE, TRUE); if (make_global_read_lock_block_commit(thd)) // Killed { /* Don't leave things in a half-locked state */ @@ -6540,7 +6540,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } } else - result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); + result= close_cached_tables(thd, tables, FALSE, (options & REFRESH_FAST) ? + FALSE : TRUE, FALSE); my_dbopt_cleanup(); } if (options & REFRESH_HOSTS) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c618d170fb7..e12edebb95d 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6665,7 +6665,7 @@ view_err: if (thd->locked_tables && new_name == table_name && new_db == db) { thd->in_lock_tables= 1; - error= reopen_tables(thd, 1, 0); + error= reopen_tables(thd, 1, 1); thd->in_lock_tables= 0; if (error) goto err_with_placeholders; -- cgit v1.2.1 From 9e7abc2c71103ac59d6475a90eb77d6abc08063e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 04:19:23 +0300 Subject: Fix a compilation error. --- sql/protocol.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/sql/protocol.cc b/sql/protocol.cc index f7b3a496447..c1fc1ce8643 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -74,7 +74,6 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length) */ void net_send_error(THD *thd, uint sql_errno, const char *err) { - NET *net= &thd->net; DBUG_ENTER("net_send_error"); DBUG_ASSERT(!thd->spcont); -- cgit v1.2.1 From ec206b4caea4b031056822f43cfe0ecf0fab86ea Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 04:33:54 +0300 Subject: Silence a new warning printed by rpl_row_tabledefs_3innodb test execution. --- mysql-test/lib/mtr_report.pl | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl index a0a796dddf2..b1f15e5e187 100644 --- a/mysql-test/lib/mtr_report.pl +++ b/mysql-test/lib/mtr_report.pl @@ -304,6 +304,7 @@ sub mtr_report_stats ($) { /Slave: Error .*Unknown table/ or /Slave: Error in Write_rows event: / or /Slave: Field .* of table .* has no default value/ or + /Slave: Field .* doesn't have a default value/ or /Slave: Query caused different errors on master and slave/ or /Slave: Table .* doesn't exist/ or /Slave: Table width mismatch/ or -- cgit v1.2.1 From a3a0d4359fef580a30704797ceeed613600deedc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 05:37:38 +0300 Subject: Fix a compilation warning and a subtle bug with truncation of the last_insert_id introduced by the patch for Bug#12713 (part 1) --- sql/sql_class.cc | 2 +- sql/sql_class.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index b68a532a2dc..3d3f6e4a965 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -383,7 +383,7 @@ Diagnostics_area::reset_diagnostics_area() void Diagnostics_area::set_ok_status(THD *thd, ha_rows affected_rows_arg, - ulong last_insert_id_arg, + ulonglong last_insert_id_arg, const char *message_arg) { DBUG_ASSERT(! is_set()); diff --git a/sql/sql_class.h b/sql/sql_class.h index 0df6fd47dca..3808fb68a01 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1003,7 +1003,7 @@ public: bool can_overwrite_status; void set_ok_status(THD *thd, ha_rows affected_rows_arg, - ulong last_insert_id_arg, + ulonglong last_insert_id_arg, const char *message); void set_eof_status(THD *thd); void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg); @@ -1034,7 +1034,7 @@ public: ha_rows affected_rows() const { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; } - ulong last_insert_id() const + ulonglong last_insert_id() const { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; } uint total_warn_count() const @@ -1080,7 +1080,7 @@ private: thd->first_successful_insert_id_in_prev_stmt, which is used to implement LAST_INSERT_ID(). */ - ulong m_last_insert_id; + ulonglong m_last_insert_id; /** The total number of warnings. */ uint m_total_warn_count; enum_diagnostics_status m_status; -- cgit v1.2.1 From c6675cd1876a90468ca2e2cb10471af93728aa6c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 13:38:22 +0300 Subject: BUG#32198: Comparison of DATE with DATETIME still not using indexes correctly - Make conditions like "date_col $CMP$ 'datetime-const'" range-sargable mysql-test/r/range.result: BUG#32198: Comparison of DATE with DATETIME still not using indexes correctly - Testcase mysql-test/t/range.test: BUG#32198: Comparison of DATE with DATETIME still not using indexes correctly - Testcase sql/field.cc: BUG#32198: Comparison of DATE with DATETIME still not using indexes correctly - Added comments --- mysql-test/r/range.result | 18 ++++++++++++++++++ mysql-test/t/range.test | 20 ++++++++++++++++++++ sql/field.cc | 3 +++ sql/opt_range.cc | 27 ++++++++++++++++++++++++++- 4 files changed, 67 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 58f8d077ac5..e0084b53320 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -1135,3 +1135,21 @@ item started price A1 2005-11-01 08:00:00 1000.000 A1 2005-11-15 00:00:00 2000.000 DROP TABLE t1; + +BUG#32198 "Comparison of DATE with DATETIME still not using indexes correctly" + +CREATE TABLE t1 ( +id int(11) NOT NULL auto_increment, +dateval date default NULL, +PRIMARY KEY (id), +KEY dateval (dateval) +) AUTO_INCREMENT=173; +INSERT INTO t1 VALUES +(1,'2007-01-01'),(2,'2007-01-02'),(3,'2007-01-03'),(4,'2007-01-04'), +(5,'2007-01-05'),(6,'2007-01-06'),(7,'2007-01-07'),(8,'2007-01-08'), +(9,'2007-01-09'),(10,'2007-01-10'),(11,'2007-01-11'); +This must use range access: +explain select * from t1 where dateval >= '2007-01-01 00:00:00' and dateval <= '2007-01-02 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range dateval dateval 4 NULL 2 Using where +drop table t1; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index b8d4b91f03d..87ba3510326 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -935,4 +935,24 @@ SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00'; DROP TABLE t1; +--echo +--echo BUG#32198 "Comparison of DATE with DATETIME still not using indexes correctly" +--echo +CREATE TABLE t1 ( + id int(11) NOT NULL auto_increment, + dateval date default NULL, + PRIMARY KEY (id), + KEY dateval (dateval) +) AUTO_INCREMENT=173; + +INSERT INTO t1 VALUES +(1,'2007-01-01'),(2,'2007-01-02'),(3,'2007-01-03'),(4,'2007-01-04'), +(5,'2007-01-05'),(6,'2007-01-06'),(7,'2007-01-07'),(8,'2007-01-08'), +(9,'2007-01-09'),(10,'2007-01-10'),(11,'2007-01-11'); + +--echo This must use range access: +explain select * from t1 where dateval >= '2007-01-01 00:00:00' and dateval <= '2007-01-02 23:59:59'; + +drop table t1; + # End of 5.0 tests diff --git a/sql/field.cc b/sql/field.cc index 86853389c64..955694933a0 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5259,6 +5259,9 @@ void Field_date::sql_type(String &res) const 1 Value was cut during conversion 2 Wrong date string 3 Datetime value that was cut (warning level NOTE) + This is used by opt_range.cc:get_mm_leaf(). Note that there is a + nearly-identical class Field_date doesn't ever return 3 from its + store function. */ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7a51dbbe76c..5d8bba69422 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -4414,6 +4414,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, { tree= new (alloc) SEL_ARG(field, 0, 0); tree->type= SEL_ARG::IMPOSSIBLE; + goto end; } else { @@ -4422,8 +4423,32 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, for the cases like int_field > 999999999999999999999999 as well. */ tree= 0; + if (err == 3 && field->type() == FIELD_TYPE_DATE && + (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC || + type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) ) + { + /* + We were saving DATETIME into a DATE column, the conversion went ok + but a non-zero time part was cut off. + + In MySQL's SQL dialect, DATE and DATETIME are compared as datetime + values. Index over a DATE column uses DATE comparison. Changing + from one comparison to the other is possible: + + datetime(date_col)< '2007-12-10 12:34:55' -> date_col<='2007-12-10' + datetime(date_col)<='2007-12-10 12:34:55' -> date_col<='2007-12-10' + + datetime(date_col)> '2007-12-10 12:34:55' -> date_col>='2007-12-10' + datetime(date_col)>='2007-12-10 12:34:55' -> date_col>='2007-12-10' + + but we'll need to convert '>' to '>=' and '<' to '<='. This will + be done together with other types at the end of this function + (grep for field_is_equal_to_item) + */ + } + else + goto end; } - goto end; } if (err < 0) { -- cgit v1.2.1 From cdca0af70d8da157ba86773aa03bb335fab1d43f Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 12:03:11 +0100 Subject: Bug #33206 signal not reset properly in ndb --- ndb/src/kernel/vm/SimulatedBlock.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp index b58e1feed9d..5e23d95bce2 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -1012,6 +1012,7 @@ SimulatedBlock::assembleFragments(Signal * signal){ /** * Don't release allocated segments */ + signal->header.m_fragmentInfo = 0; signal->header.m_noOfSections = 0; return false; } @@ -1039,6 +1040,7 @@ SimulatedBlock::assembleFragments(Signal * signal){ * fragInfo = 2 */ if(fragInfo == 2){ + signal->header.m_fragmentInfo = 0; signal->header.m_noOfSections = 0; return false; } -- cgit v1.2.1 From f1d980542233ac18d3e4acc6911b8adfc432bb09 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 15:47:23 +0400 Subject: after merge fix --- mysql-test/r/delayed.result | 16 ---------------- mysql-test/r/func_misc.result | 2 +- mysql-test/t/func_misc.test | 3 +-- 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index e7ee23b3e2a..9f6ebea7e1b 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -109,20 +109,12 @@ c1 DROP TABLE t1; SET @@auto_increment_offset= @bug20627_old_auto_increment_offset; -Warnings: -Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@auto_increment_increment= @bug20627_old_auto_increment_increment; -Warnings: -Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @@session.auto_increment_offset= @bug20627_old_session_auto_increment_offset; -Warnings: -Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@session.auto_increment_increment= @bug20627_old_session_auto_increment_increment; -Warnings: -Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @bug20830_old_auto_increment_offset= @@auto_increment_offset; SET @bug20830_old_auto_increment_increment= @@ -245,20 +237,12 @@ SUM(c1) DROP TABLE t1; SET @@auto_increment_offset= @bug20830_old_auto_increment_offset; -Warnings: -Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@auto_increment_increment= @bug20830_old_auto_increment_increment; -Warnings: -Warning 1292 Truncated incorrect auto-increment-increment value: '0' SET @@session.auto_increment_offset= @bug20830_old_session_auto_increment_offset; -Warnings: -Warning 1292 Truncated incorrect auto-increment-offset value: '0' SET @@session.auto_increment_increment= @bug20830_old_session_auto_increment_increment; -Warnings: -Warning 1292 Truncated incorrect auto-increment-increment value: '0' CREATE TABLE t1(a BIT); INSERT DELAYED INTO t1 VALUES(1); FLUSH TABLE t1; diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 4ba5ee674a9..4f6b6d3a0d8 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -210,7 +210,7 @@ test CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (5), (2); SELECT NAME_CONST(x,2) FROM (SELECT a x FROM t1) t; -ERROR HY000: The 'NAME_CONST' syntax is reserved for purposes internal to the MySQL server +ERROR HY000: Incorrect arguments to NAME_CONST DROP TABLE t1; CREATE TABLE t1(a INT); INSERT INTO t1 VALUES (), (), (); diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index 82db9ddbd67..ccb59df5677 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -212,12 +212,11 @@ SELECT NAME_CONST('test', 'test'); CREATE TABLE t1 (a int); INSERT INTO t1 VALUES (5), (2); ---error ER_RESERVED_SYNTAX +--error ER_WRONG_ARGUMENTS SELECT NAME_CONST(x,2) FROM (SELECT a x FROM t1) t; DROP TABLE t1; ---echo End of 5.0 tests # # Bug #32559: connection hangs on query with name_const -- cgit v1.2.1 From f7f96cd28dcbbe222df80c65abc98dbaac71fbaf Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 16:43:38 +0400 Subject: after merge fix --- mysql-test/r/range.result | 28 ++++++++++++++-------------- tests/mysql_client_test.c | 2 ++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 005969c7f18..92db6d8429f 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -1135,20 +1135,6 @@ item started price A1 2005-11-01 08:00:00 1000.000 A1 2005-11-15 00:00:00 2000.000 DROP TABLE t1; -create table t1 (a int); -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t2 (a int, b int, filler char(100)); -insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, -t1 B, t1 C where A.a < 5; -insert into t2 select 1000, b, 'filler' from t2; -alter table t2 add index (a,b); -select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; -Z -In following EXPLAIN the access method should be ref, #rows~=500 (and not 2) -explain select * from t2 where a=1000 and b<11; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ref a a 5 const 502 Using where -drop table t1, t2; BUG#32198 "Comparison of DATE with DATETIME still not using indexes correctly" @@ -1167,3 +1153,17 @@ explain select * from t1 where dateval >= '2007-01-01 00:00:00' and dateval <= ' id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range dateval dateval 4 NULL 2 Using where drop table t1; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (a int, b int, filler char(100)); +insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, +t1 B, t1 C where A.a < 5; +insert into t2 select 1000, b, 'filler' from t2; +alter table t2 add index (a,b); +select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; +Z +In following EXPLAIN the access method should be ref, #rows~=500 (and not 2) +explain select * from t2 where a=1000 and b<11; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 5 const 502 Using where +drop table t1, t2; diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 6042ff4a2bf..9284b2182b1 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -16903,7 +16903,9 @@ static void test_bug20023() int sql_big_selects_4; int sql_big_selects_5; +#if NOT_USED char query_buffer[MAX_TEST_QUERY_LENGTH]; +#endif /* Create a new connection. */ -- cgit v1.2.1 From 0cc269da52f939ffc6a6272370d113aac9ab5369 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 18:26:27 +0400 Subject: after merge fix --- sql/sql_parse.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 2435c16819f..7c00ac6d1c9 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -487,7 +487,7 @@ end: (CREATE TABLE, ALTER TABLE ... UNION=(...)). Set TL_WRITE for every child. Set 'db' for every child if not present. */ - +#ifndef NO_EMBEDDED_ACCESS_CHECKS static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list) { @@ -508,7 +508,7 @@ static bool check_merge_table_access(THD *thd, char *db, } return error; } - +#endif /* This works because items are allocated with sql_alloc() */ -- cgit v1.2.1 From e51e6097b98eb4faae8fec4704feab251d8031dc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 21:13:08 +0400 Subject: valgrind error fix --- sql/ha_blackhole.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 93d085bbc88..f098ef71a43 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -296,13 +296,21 @@ static byte* blackhole_get_key(st_blackhole_share *share, uint *length, } +static void blackhole_free_key(st_blackhole_share *share) +{ + thr_lock_delete(&share->lock); + my_free((byte*) share, MYF(0)); +} + + bool blackhole_db_init() { DBUG_ENTER("blackhole_db_init"); if (pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST)) goto error; if (hash_init(&blackhole_open_tables, &my_charset_bin, 32, 0, 0, - (hash_get_key) blackhole_get_key, 0, 0)) + (hash_get_key) blackhole_get_key, + (hash_free_key) blackhole_free_key, 0)) { VOID(pthread_mutex_destroy(&blackhole_mutex)); } -- cgit v1.2.1 From f61f5f72f55b44d33ff77de00432649134eab25c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 21:34:05 +0400 Subject: additional after merge fix mysql-test/r/partition_range.result: removed fix for bug#30573 mysql-test/suite/parts/r/rpl_partition.result: updated result file mysql-test/t/partition_range.test: removed test case for bug#30573 sql/ha_partition.cc: removed fix for bug#30573 --- mysql-test/r/partition_range.result | 20 -------------- mysql-test/suite/parts/r/rpl_partition.result | 2 +- mysql-test/t/partition_range.test | 40 +++++++++++++-------------- sql/ha_partition.cc | 2 +- 4 files changed, 22 insertions(+), 42 deletions(-) diff --git a/mysql-test/r/partition_range.result b/mysql-test/r/partition_range.result index f75cbbf5470..4da79704ec3 100644 --- a/mysql-test/r/partition_range.result +++ b/mysql-test/r/partition_range.result @@ -709,23 +709,3 @@ WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where DROP TABLE t1; -create table t1 (a int); -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -CREATE TABLE t2 ( -defid int(10) unsigned NOT NULL, -day int(10) unsigned NOT NULL, -count int(10) unsigned NOT NULL, -filler char(200), -KEY (defid,day) -) -PARTITION BY RANGE (day) ( -PARTITION p7 VALUES LESS THAN (20070401) , -PARTITION p8 VALUES LESS THAN (20070501)); -insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B; -insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B; -insert into t2 values(52, 20070321, 123, 'filler') ; -insert into t2 values(52, 20070322, 456, 'filler') ; -select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid; -sum(count) -579 -drop table t1, t2; diff --git a/mysql-test/suite/parts/r/rpl_partition.result b/mysql-test/suite/parts/r/rpl_partition.result index 79a95fd613b..bd77d4317bd 100644 --- a/mysql-test/suite/parts/r/rpl_partition.result +++ b/mysql-test/suite/parts/r/rpl_partition.result @@ -121,7 +121,7 @@ Create Table CREATE TABLE `t3` ( `fkid` mediumint(9) DEFAULT NULL, `filler` varchar(255) DEFAULT NULL, PRIMARY KEY (`id`) -) ENGINE=MyISAM AUTO_INCREMENT=9 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION pa2 VALUES LESS THAN (20) ENGINE = MyISAM, PARTITION pa3 VALUES LESS THAN (30) ENGINE = MyISAM, PARTITION pa4 VALUES LESS THAN (40) ENGINE = MyISAM, PARTITION pa5 VALUES LESS THAN (50) ENGINE = MyISAM, PARTITION pa6 VALUES LESS THAN (60) ENGINE = MyISAM, PARTITION pa7 VALUES LESS THAN (70) ENGINE = MyISAM, PARTITION pa8 VALUES LESS THAN (80) ENGINE = MyISAM, PARTITION pa9 VALUES LESS THAN (90) ENGINE = MyISAM, PARTITION pa10 VALUES LESS THAN (100) ENGINE = MyISAM, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */ +) ENGINE=MyISAM AUTO_INCREMENT=1001 DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (id) SUBPARTITION BY HASH (id) SUBPARTITIONS 2 (PARTITION pa1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION pa2 VALUES LESS THAN (20) ENGINE = MyISAM, PARTITION pa3 VALUES LESS THAN (30) ENGINE = MyISAM, PARTITION pa4 VALUES LESS THAN (40) ENGINE = MyISAM, PARTITION pa5 VALUES LESS THAN (50) ENGINE = MyISAM, PARTITION pa6 VALUES LESS THAN (60) ENGINE = MyISAM, PARTITION pa7 VALUES LESS THAN (70) ENGINE = MyISAM, PARTITION pa8 VALUES LESS THAN (80) ENGINE = MyISAM, PARTITION pa9 VALUES LESS THAN (90) ENGINE = MyISAM, PARTITION pa10 VALUES LESS THAN (100) ENGINE = MyISAM, PARTITION pa11 VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */ show slave status; Slave_IO_State # Master_Host 127.0.0.1 diff --git a/mysql-test/t/partition_range.test b/mysql-test/t/partition_range.test index a9f6d410fa3..6ed3abab46a 100644 --- a/mysql-test/t/partition_range.test +++ b/mysql-test/t/partition_range.test @@ -762,24 +762,24 @@ DROP TABLE t1; # # BUG#30573: get wrong result with "group by" on PARTITIONed table # -create table t1 (a int); -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -CREATE TABLE t2 ( - defid int(10) unsigned NOT NULL, - day int(10) unsigned NOT NULL, - count int(10) unsigned NOT NULL, - filler char(200), - KEY (defid,day) -) -PARTITION BY RANGE (day) ( - PARTITION p7 VALUES LESS THAN (20070401) , - PARTITION p8 VALUES LESS THAN (20070501)); - -insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B; -insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B; -insert into t2 values(52, 20070321, 123, 'filler') ; -insert into t2 values(52, 20070322, 456, 'filler') ; - -select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid; -drop table t1, t2; +#create table t1 (a int); +#insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +#CREATE TABLE t2 ( +# defid int(10) unsigned NOT NULL, +# day int(10) unsigned NOT NULL, +# count int(10) unsigned NOT NULL, +# filler char(200), +# KEY (defid,day) +#) +#PARTITION BY RANGE (day) ( +# PARTITION p7 VALUES LESS THAN (20070401) , +# PARTITION p8 VALUES LESS THAN (20070501)); + +#insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B; +#insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B; +#insert into t2 values(52, 20070321, 123, 'filler') ; +#insert into t2 values(52, 20070322, 456, 'filler') ; + +#select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid; +#drop table t1, t2; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index a792d5c4161..d251e056c3e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3853,7 +3853,7 @@ int ha_partition::read_range_first(const key_range *start_key, start_key->key, start_key->keypart_map, start_key->flag); } - DBUG_RETURN (error? error: compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); + DBUG_RETURN(error); } -- cgit v1.2.1 From 12504d906bfabdd64938e313d3ac2d88b490834d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 23:46:29 +0400 Subject: additional fix --- sql/ha_blackhole.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index f098ef71a43..e477686d18e 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -299,7 +299,7 @@ static byte* blackhole_get_key(st_blackhole_share *share, uint *length, static void blackhole_free_key(st_blackhole_share *share) { thr_lock_delete(&share->lock); - my_free((byte*) share, MYF(0)); + my_free((gptr) share, MYF(0)); } -- cgit v1.2.1 From 291656044ff77ddee66629fd5eb5008b76242143 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 21:31:38 +0100 Subject: Bug #33168 Incorrectly handled parameters can make the TC crash during node failure - during commit deadlock timeout needs to be at least 5 times db hearbeat --- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 1 + ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 6fb03fa2407..0c5ee128ce0 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1674,6 +1674,7 @@ private: UintR cfailure_nr; UintR coperationsize; UintR ctcTimer; + UintR cDbHbInterval; ApiConnectRecordPtr tmpApiConnectptr; UintR tcheckGcpId; diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 40d5dcf7407..22fba0bd82c 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -643,6 +643,10 @@ void Dbtc::execREAD_CONFIG_REQ(Signal* signal) ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val); set_timeout_value(val); + val = 1500; + ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &val); + cDbHbInterval = (val < 10) ? 10 : val; + val = 3000; ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val); set_appl_timeout_value(val); @@ -6390,6 +6394,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode) // conditions should get us here. We ignore it. /*------------------------------------------------------------------*/ case CS_PREPARE_TO_COMMIT: + { jam(); /*------------------------------------------------------------------*/ /* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/ @@ -6398,12 +6403,16 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode) // To ensure against strange bugs we crash the system if we have passed // time-out period by a factor of 10 and it is also at least 5 seconds. /*------------------------------------------------------------------*/ - if (((ctcTimer - getApiConTimer(apiConnectptr.i)) > (10 * ctimeOutValue)) && - ((ctcTimer - getApiConTimer(apiConnectptr.i)) > 500)) { - jam(); - systemErrorLab(signal, __LINE__); + Uint32 time_passed = ctcTimer - getApiConTimer(apiConnectptr.i); + if (time_passed > 500 && + time_passed > (5 * cDbHbInterval) && + time_passed > (10 * ctimeOutValue)) + { + jam(); + systemErrorLab(signal, __LINE__); }//if break; + } case CS_COMMIT_SENT: jam(); /*------------------------------------------------------------------*/ -- cgit v1.2.1 From 37a2c4590a7453d5c8e5b67c2f512162323ac738 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 13 Dec 2007 23:58:55 +0300 Subject: Fix broken embedded build (broken by the patch for Bug#12713, first part). client/mysqltest.c: Remove a call to my_message() (server call) in case of a client error. It asserts in the embedded build now, since is called after send_ok(). libmysqld/emb_qcache.cc: Use net_send_eof (embedded implementation) in embedded query cache/ send result to client, since send_eof currently does not send anything. libmysqld/emb_qcache.h: Declare net_send_eof. libmysqld/lib_sql.cc: Fix the main loop of the embedded server to use the new Diagnostics_area API. sql/log.cc: Silence errors of open_performance_schema_table. This function is called for general logging, and it happens after the error has been sent to the client, and thus triggers an assert. storage/myisam/ha_myisam.cc: Remove an old hack that broke repair.test in embedded build: unless we clear an error here, the server attempts to send OK after ERROR. This currently asserts. --- client/mysqltest.c | 1 - libmysqld/emb_qcache.cc | 2 +- libmysqld/emb_qcache.h | 1 + libmysqld/lib_sql.cc | 2 ++ sql/log.cc | 69 ++++++++++++++++++++++++++------------------- storage/myisam/ha_myisam.cc | 2 -- 6 files changed, 44 insertions(+), 33 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index cca2420d498..1876e2e0144 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -8018,7 +8018,6 @@ REPLACE *init_replace(char * *from, char * *to,uint count, if (!len) { errno=EINVAL; - my_message(0,"No to-string for last from-string",MYF(ME_BELL)); DBUG_RETURN(0); } states+=len+1; diff --git a/libmysqld/emb_qcache.cc b/libmysqld/emb_qcache.cc index 17549bfa96b..07b7d82a0cc 100644 --- a/libmysqld/emb_qcache.cc +++ b/libmysqld/emb_qcache.cc @@ -447,7 +447,7 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src) *prev_row= NULL; data->embedded_info->prev_ptr= prev_row; return_ok: - send_eof(thd); + net_send_eof(thd, thd->server_status, thd->total_warn_count); DBUG_RETURN(0); err: DBUG_RETURN(1); diff --git a/libmysqld/emb_qcache.h b/libmysqld/emb_qcache.h index 6e320fbd967..e1c219ec461 100644 --- a/libmysqld/emb_qcache.h +++ b/libmysqld/emb_qcache.h @@ -79,3 +79,4 @@ public: uint emb_count_querycache_size(THD *thd); int emb_load_querycache_result(THD *thd, Querycache_stream *src); void emb_store_querycache_result(Querycache_stream *dst, THD* thd); +void net_send_eof(THD *thd, uint server_status, uint total_warn_count); diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index df6d68c7582..f13129cc52d 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -87,6 +87,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, /* Clear result variables */ thd->clear_error(); + thd->main_da.reset_diagnostics_area(); mysql->affected_rows= ~(my_ulonglong) 0; mysql->field_count= 0; net_clear_error(net); @@ -625,6 +626,7 @@ int check_embedded_connection(MYSQL *mysql, const char *db) strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1); sctx->priv_user= sctx->user= my_strdup(mysql->user, MYF(0)); result= check_user(thd, COM_CONNECT, NULL, 0, db, true); + net_end_statement(thd); emb_read_query_result(mysql); return result; } diff --git a/sql/log.cc b/sql/log.cc index 9fdede9ef2c..0cea59eed98 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -280,29 +280,34 @@ void Log_to_csv_event_handler::cleanup() /* log event handlers */ -/* +/** Log command to the general log table - SYNOPSIS - log_general() - - event_time command start timestamp - user_host the pointer to the string with user@host info - user_host_len length of the user_host string. this is computed once - and passed to all general log event handlers - thread_id Id of the thread, issued a query - command_type the type of the command being logged - command_type_len the length of the string above - sql_text the very text of the query being executed - sql_text_len the length of sql_text string - - DESCRIPTION - - Log given command to the general log table - - RETURN - FALSE - OK - TRUE - error occured + Log given command to the general log table. + + @param event_time command start timestamp + @param user_host the pointer to the string with user@host info + @param user_host_len length of the user_host string. this is computed + once and passed to all general log event handlers + @param thread_id Id of the thread, issued a query + @param command_type the type of the command being logged + @param command_type_len the length of the string above + @param sql_text the very text of the query being executed + @param sql_text_len the length of sql_text string + + + @return This function attempts to never call my_error(). This is + necessary, because general logging happens already after a statement + status has been sent to the client, so the client can not see the + error anyway. Besides, the error is not related to the statement + being executed and is internal, and thus should be handled + internally (@todo: how?). + If a write to the table has failed, the function attempts to + write to a short error message to the file. The failure is also + indicated in the return value. + + @retval FALSE OK + @retval TRUE error occured */ bool Log_to_csv_event_handler:: @@ -342,6 +347,20 @@ bool Log_to_csv_event_handler:: table_list.db= MYSQL_SCHEMA_NAME.str; table_list.db_length= MYSQL_SCHEMA_NAME.length; + /* + 1) open_performance_schema_table generates an error of the + table can not be opened or is corrupted. + 2) "INSERT INTO general_log" can generate warning sometimes. + + Suppress these warnings and errors, they can't be dealt with + properly anyway. + + QQ: this problem needs to be studied in more detail. + Comment this 2 lines and run "cast.test" to see what's happening. + */ + thd->push_internal_handler(& error_handler); + need_pop= TRUE; + if (!(table= open_performance_schema_table(thd, & table_list, & open_tables_backup))) goto err; @@ -357,14 +376,6 @@ bool Log_to_csv_event_handler:: /* Honor next number columns if present */ table->next_number_field= table->found_next_number_field; - /* - "INSERT INTO general_log" can generate warning sometimes. - QQ: this problem needs to be studied in more details. - Comment this 2 lines and run "cast.test" to see what's happening: - */ - thd->push_internal_handler(& error_handler); - need_pop= TRUE; - /* NOTE: we do not call restore_record() here, as all fields are filled by the Logger (=> no need to load default ones). diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index a557890a7b9..7710660f29e 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1407,10 +1407,8 @@ int ha_myisam::enable_indexes(uint mode) might have been set by the first repair. They can still be seen with SHOW WARNINGS then. */ -#ifndef EMBEDDED_LIBRARY if (! error) thd->clear_error(); -#endif /* EMBEDDED_LIBRARY */ } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; -- cgit v1.2.1 From a6eec72e7bcf1ef04dd4569e51fa8845cb192fc1 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 14 Dec 2007 02:23:42 +0300 Subject: Remove an unused argument of sql_update::do_updates(). sql/sql_class.h: Remove an unused argument. sql/sql_update.cc: Remove an unused argument. --- sql/sql_class.h | 2 +- sql/sql_update.cc | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/sql/sql_class.h b/sql/sql_class.h index 3808fb68a01..7a5e3963a9c 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2692,7 +2692,7 @@ public: bool send_data(List &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); - int do_updates (bool from_send_error); + int do_updates(); bool send_eof(); virtual void abort(); }; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 1e9b7da1c81..e2dfd89aa32 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1729,7 +1729,7 @@ void multi_update::abort() todo/fixme: do_update() is never called with the arg 1. should it change the signature to become argless? */ - VOID(do_updates(0)); + VOID(do_updates()); } } if (thd->transaction.stmt.modified_non_trans_table) @@ -1760,7 +1760,7 @@ void multi_update::abort() } -int multi_update::do_updates(bool from_send_error) +int multi_update::do_updates() { TABLE_LIST *cur_table; int local_error= 0; @@ -1907,7 +1907,6 @@ int multi_update::do_updates(bool from_send_error) DBUG_RETURN(0); err: - if (!from_send_error) { thd->fatal_error(); prepare_record_for_error_message(local_error, table); @@ -1949,7 +1948,7 @@ bool multi_update::send_eof() Does updates for the last n - 1 tables, returns 0 if ok; error takes into account killed status gained in do_updates() */ - int local_error = (table_count) ? do_updates(0) : 0; + int local_error = (table_count) ? do_updates() : 0; /* if local_error is not set ON until after do_updates() then later carried out killing should not affect binlogging. -- cgit v1.2.1 From bb9950a8d7c976ad7d46f1d227f3c010d22d96fb Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 14 Dec 2007 16:58:12 +0100 Subject: Raise version number after cloning 5.0.54 --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index ead2f85afb7..63d3976aa3c 100644 --- a/configure.in +++ b/configure.in @@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! # remember to also change ndb version below and update version.c in ndb -AM_INIT_AUTOMAKE(mysql, 5.0.54) +AM_INIT_AUTOMAKE(mysql, 5.0.56) AM_CONFIG_HEADER([include/config.h:config.h.in]) PROTOCOL_VERSION=10 @@ -23,7 +23,7 @@ NDB_SHARED_LIB_VERSION=$NDB_SHARED_LIB_MAJOR_VERSION:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=54 +NDB_VERSION_BUILD=56 NDB_VERSION_STATUS="" # Set all version vars based on $VERSION. How do we do this more elegant ? -- cgit v1.2.1 From 0a578711a2e44dcb6bfe6c1491b3b8e799ae13b8 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 14 Dec 2007 17:46:24 -0700 Subject: WL#4165 (Prepared statements: validation) Adding the tests cases alone for WL#4165, disabled mysql-test/t/disabled.def: WL#4165 (Prepared statements: validation) mysql-test/r/ps_ddl.result: WL#4165 (Prepared statements: validation) mysql-test/t/ps_ddl.test: WL#4165 (Prepared statements: validation) --- mysql-test/r/ps_ddl.result | 2329 ++++++++++++++++++++++++++++++++++++++++++++ mysql-test/t/disabled.def | 2 +- mysql-test/t/ps_ddl.test | 1851 +++++++++++++++++++++++++++++++++++ 3 files changed, 4181 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/ps_ddl.result create mode 100644 mysql-test/t/ps_ddl.test diff --git a/mysql-test/r/ps_ddl.result b/mysql-test/r/ps_ddl.result new file mode 100644 index 00000000000..0987e765265 --- /dev/null +++ b/mysql-test/r/ps_ddl.result @@ -0,0 +1,2329 @@ +===================================================================== +Testing 1: NOTHING -> TABLE transitions +===================================================================== +drop table if exists t1; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +prepare stmt from 'select * from t1'; +ERROR 42S02: Table 'test.t1' doesn't exist +===================================================================== +Testing 2: NOTHING -> TEMPORARY TABLE transitions +===================================================================== +===================================================================== +Testing 3: NOTHING -> VIEW transitions +===================================================================== +===================================================================== +Testing 4: TABLE -> NOTHING transitions +===================================================================== +drop table if exists t4; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t4(a int); +prepare stmt from 'select * from t4'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t4; +execute stmt; +ERROR 42S02: Table 'test.t4' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42S02: Table 'test.t4' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +===================================================================== +Testing 5: TABLE -> TABLE (DDL) transitions +===================================================================== +drop table if exists t5; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t5(a int); +prepare stmt from 'select * from t5'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t5 add column (b int); +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t5; +===================================================================== +Testing 6: TABLE -> TABLE (TRIGGER) transitions +===================================================================== +drop table if exists t6; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t6(a int); +prepare stmt from 'insert into t6(a) value (?)'; +set @val=1; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=2; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +create trigger t6_bi before insert on t6 for each row +begin +set @message= "t6_bi"; +end +$$ +set @message="none"; +set @val=3; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +set @val=4; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=5; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +set @message="none"; +set @val=6; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +create trigger t6_bd before delete on t6 for each row +begin +set @message= "t6_bd"; +end +$$ +set @message="none"; +set @val=7; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +set @message="none"; +set @val=8; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=9; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +set @message="none"; +set @val=10; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select @message; +@message +t6_bi +drop trigger t6_bi; +create trigger t6_bi before insert on t6 for each row +begin +set @message= "t6_bi (2)"; +end +$$ +set @message="none"; +set @val=11; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +set @val=12; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=13; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +set @message="none"; +set @val=14; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +drop trigger t6_bd; +create trigger t6_bd before delete on t6 for each row +begin +set @message= "t6_bd (2)"; +end +$$ +set @message="none"; +set @val=15; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +set @message="none"; +set @val=16; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=17; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +set @message="none"; +set @val=18; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +t6_bi (2) +drop trigger t6_bi; +set @message="none"; +set @val=19; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +set @val=20; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=21; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +set @val=22; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +drop trigger t6_bd; +set @val=23; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +set @val=24; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +select @message; +@message +none +select * from t6 order by a; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +drop table t6; +===================================================================== +Testing 7: TABLE -> TABLE (TRIGGER dependencies) transitions +===================================================================== +drop table if exists t7_proc; +drop table if exists t7_func; +drop table if exists t7_view; +drop table if exists t7_table; +drop table if exists t7_dependent_table; +drop table if exists t7_table_trigger; +drop table if exists t7_audit; +drop procedure if exists audit_proc; +drop function if exists audit_func; +drop view if exists audit_view; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t7_proc(a int); +create table t7_func(a int); +create table t7_view(a int); +create table t7_table(a int); +create table t7_table_trigger(a int); +create table t7_audit(old_a int, new_a int, reason varchar(50)); +create table t7_dependent_table(old_a int, new_a int, reason varchar(50)); +create procedure audit_proc(a int) +insert into t7_audit values (NULL, a, "proc v1"); +create function audit_func() returns varchar(50) +return "func v1"; +create view audit_view as select "view v1" as reason from dual; +create trigger t7_proc_bi before insert on t7_proc for each row +call audit_proc(NEW.a); +create trigger t7_func_bi before insert on t7_func for each row +insert into t7_audit values (NULL, NEW.a, audit_func()); +create trigger t7_view_bi before insert on t7_view for each row +insert into t7_audit values (NULL, NEW.a, (select reason from audit_view)); +create trigger t7_table_bi before insert on t7_table for each row +insert into t7_dependent_table values (NULL, NEW.a, "dependent table"); +create trigger t7_table_trigger_bi before insert on t7_dependent_table +for each row set NEW.reason="trigger v1"; +prepare stmt_proc from 'insert into t7_proc(a) value (?)'; +set @val=101; +execute stmt_proc using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=102; +execute stmt_proc using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure audit_proc; +create procedure audit_proc(a int) +insert into t7_audit values (NULL, a, "proc v2"); +set @val=103; +execute stmt_proc using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=104; +execute stmt_proc using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +prepare stmt_func from 'insert into t7_func(a) value (?)'; +set @val=201; +execute stmt_func using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=202; +execute stmt_func using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function audit_func; +create function audit_func() returns varchar(50) +return "func v2"; +set @val=203; +execute stmt_func using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=204; +execute stmt_func using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +prepare stmt_view from 'insert into t7_view(a) value (?)'; +set @val=301; +execute stmt_view using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +set @val=302; +execute stmt_view using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view audit_view; +create view audit_view as select "view v2" as reason from dual; +set @val=303; +execute stmt_view using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +set @val=304; +execute stmt_view using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +prepare stmt_table from 'insert into t7_table(a) value (?)'; +set @val=401; +execute stmt_table using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +set @val=402; +execute stmt_table using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +alter table t7_dependent_table add column comments varchar(100) default NULL; +set @val=403; +execute stmt_table using @val; +ERROR 21S01: Column count doesn't match value count at row 1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +set @val=404; +execute stmt_table using @val; +ERROR 21S01: Column count doesn't match value count at row 1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +alter table t7_dependent_table drop column comments; +set @val=405; +execute stmt_table using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +set @val=406; +execute stmt_table using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +prepare stmt_table_trigger from 'insert into t7_table(a) value (?)'; +set @val=501; +execute stmt_table_trigger using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +set @val=502; +execute stmt_table_trigger using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +drop trigger t7_table_trigger_bi; +create trigger t7_table_trigger_bi before insert on t7_dependent_table +for each row set NEW.reason="trigger v2"; +set @val=503; +execute stmt_table_trigger using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +set @val=504; +execute stmt_table_trigger using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +select * from t7_audit order by new_a; +old_a new_a reason +NULL 101 proc v1 +NULL 102 proc v1 +NULL 103 proc v2 +NULL 104 proc v2 +NULL 201 func v1 +NULL 202 func v1 +NULL 203 func v2 +NULL 204 func v2 +NULL 301 view v1 +NULL 302 view v1 +NULL 303 view v1 +NULL 304 view v1 +select * from t7_dependent_table order by new_a; +old_a new_a reason +NULL 401 trigger v1 +NULL 402 trigger v1 +NULL 405 trigger v1 +NULL 406 trigger v1 +NULL 501 trigger v1 +NULL 502 trigger v1 +NULL 503 trigger v2 +NULL 504 trigger v2 +drop table t7_proc; +drop table t7_func; +drop table t7_view; +drop table t7_table; +drop table t7_dependent_table; +drop table t7_table_trigger; +drop table t7_audit; +drop procedure audit_proc; +drop function audit_func; +drop view audit_view; +===================================================================== +Testing 8: TABLE -> TEMPORARY TABLE transitions +===================================================================== +drop table if exists t8; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t8(a int); +prepare stmt from 'select * from t8'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t8; +create temporary table t8(a int); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t8; +===================================================================== +Testing 9: TABLE -> VIEW transitions +===================================================================== +drop table if exists t9; +drop table if exists t9_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t9(a int); +create table t9_b(a int); +prepare stmt from 'select * from t9'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t9; +create view t9 as select * from t9_b; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop view t9; +drop table t9_b; +===================================================================== +Testing 10: TEMPORARY TABLE -> NOTHING transitions +===================================================================== +drop temporary table if exists t10; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create temporary table t10(a int); +prepare stmt from 'select * from t10'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop temporary table t10; +execute stmt; +ERROR 42S02: Table 'test.t10' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42S02: Table 'test.t10' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +===================================================================== +Testing 11: TEMPORARY TABLE -> TABLE transitions +===================================================================== +drop table if exists t11; +drop temporary table if exists t11; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t11(a int); +insert into t11(a) value (1); +create temporary table t11(a int); +prepare stmt from 'select * from t11'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop temporary table t11; +execute stmt; +a +1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select * from t11; +a +1 +drop table t11; +===================================================================== +Testing 12: TEMPORARY TABLE -> TEMPORARY TABLE (DDL) transitions +===================================================================== +drop temporary table if exists t12; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create temporary table t12(a int); +prepare stmt from 'select * from t12'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop temporary table t12; +create temporary table t12(a int, b int); +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +select * from t12; +a b +drop table t12; +===================================================================== +Testing 13: TEMPORARY TABLE -> VIEW transitions +===================================================================== +drop temporary table if exists t13; +drop table if exists t13_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create temporary table t13(a int); +create table t13_b(a int); +prepare stmt from 'select * from t13'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop temporary table t13; +create view t13 as select * from t13_b; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop view t13; +drop table t13_b; +===================================================================== +Testing 14: VIEW -> NOTHING transitions +===================================================================== +drop view if exists t14; +drop table if exists t14_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t14_b(a int); +create view t14 as select * from t14_b; +prepare stmt from 'select * from t14'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view t14; +execute stmt; +ERROR 42S02: Table 'test.t14' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +ERROR 42S02: Table 'test.t14' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t14_b; +===================================================================== +Testing 15: VIEW -> TABLE transitions +===================================================================== +drop view if exists t15; +drop table if exists t15_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t15_b(a int); +create view t15 as select * from t15_b; +prepare stmt from 'select * from t15'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view t15; +create table t15(a int); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t15_b; +drop table t15; +===================================================================== +Testing 16: VIEW -> TEMPORARY TABLE transitions +===================================================================== +drop view if exists t16; +drop table if exists t16_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t16_b(a int); +create view t16 as select * from t16_b; +prepare stmt from 'select * from t16'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view t16; +create temporary table t16(a int); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t16_b; +drop temporary table t16; +===================================================================== +Testing 17: VIEW -> VIEW (DDL) transitions +===================================================================== +drop view if exists t17; +drop table if exists t17_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t17_b(a int); +insert into t17_b values (10), (20), (30); +create view t17 as select a, 2*a as b, 3*a as c from t17_b; +select * from t17; +a b c +10 20 30 +20 40 60 +30 60 90 +prepare stmt from 'select * from t17'; +execute stmt; +a b c +10 20 30 +20 40 60 +30 60 90 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a b c +10 20 30 +20 40 60 +30 60 90 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view t17; +create view t17 as select a, 2*a as b, 10*a as c from t17_b; +select * from t17; +a b c +10 20 100 +20 40 200 +30 60 300 +execute stmt; +a b c +10 20 100 +20 40 200 +30 60 300 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a b c +10 20 100 +20 40 200 +30 60 300 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t17_b; +drop view t17; +===================================================================== +Testing 18: VIEW -> VIEW (VIEW dependencies) transitions +===================================================================== +drop table if exists t18; +drop table if exists t18_dependent_table; +drop view if exists t18_func; +drop view if exists t18_view; +drop view if exists t18_table; +drop function if exists view_func; +drop view if exists view_view; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t18(a int); +insert into t18 values (1), (2), (3); +create function view_func(x int) returns int +return x+1; +create view view_view as select "view v1" as reason from dual; +create table t18_dependent_table(a int); +create view t18_func as select a, view_func(a) as b from t18; +create view t18_view as select a, reason as b from t18, view_view; +create view t18_table as select * from t18; +prepare stmt_func from 'select * from t18_func'; +execute stmt_func; +a b +1 2 +2 3 +3 4 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_func; +a b +1 2 +2 3 +3 4 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function view_func; +create function view_func(x int) returns int +return x*x; +execute stmt_func; +a b +1 1 +2 4 +3 9 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_func; +a b +1 1 +2 4 +3 9 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +prepare stmt_view from 'select * from t18_view'; +execute stmt_view; +a b +1 view v1 +2 view v1 +3 view v1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_view; +a b +1 view v1 +2 view v1 +3 view v1 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view view_view; +create view view_view as select "view v2" as reason from dual; +execute stmt_view; +a b +1 view v2 +2 view v2 +3 view v2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt_view; +a b +1 view v2 +2 view v2 +3 view v2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +prepare stmt_table from 'select * from t18_table'; +execute stmt_table; +a +1 +2 +3 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt_table; +a +1 +2 +3 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +alter table t18 add column comments varchar(50) default NULL; +execute stmt_table; +a +1 +2 +3 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +execute stmt_table; +a +1 +2 +3 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +drop table t18; +drop table t18_dependent_table; +drop view t18_func; +drop view t18_view; +drop view t18_table; +drop function view_func; +drop view view_view; +===================================================================== +Testing 19: Special tables (INFORMATION_SCHEMA) +===================================================================== +drop procedure if exists proc_19; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +prepare stmt from +'select ROUTINE_SCHEMA, ROUTINE_NAME, ROUTINE_TYPE + from INFORMATION_SCHEMA.ROUTINES where + routine_name=\'proc_19\''; +create procedure proc_19() select "hi there"; +execute stmt; +ROUTINE_SCHEMA ROUTINE_NAME ROUTINE_TYPE +test proc_19 PROCEDURE +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ROUTINE_SCHEMA ROUTINE_NAME ROUTINE_TYPE +test proc_19 PROCEDURE +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure proc_19; +create procedure proc_19() select "hi there, again"; +execute stmt; +ROUTINE_SCHEMA ROUTINE_NAME ROUTINE_TYPE +test proc_19 PROCEDURE +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ROUTINE_SCHEMA ROUTINE_NAME ROUTINE_TYPE +test proc_19 PROCEDURE +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure proc_19; +===================================================================== +Testing 20: Special tables (log tables) +===================================================================== +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +prepare stmt from +'select * from mysql.general_log where argument=\'IMPOSSIBLE QUERY STRING\''; +execute stmt; +event_time user_host thread_id server_id command_type argument +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +event_time user_host thread_id server_id command_type argument +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +event_time user_host thread_id server_id command_type argument +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +event_time user_host thread_id server_id command_type argument +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +===================================================================== +Testing 21: Special tables (system tables) +===================================================================== +drop procedure if exists proc_21; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +prepare stmt from +'select type, db, name from mysql.proc where name=\'proc_21\''; +create procedure proc_21() select "hi there"; +execute stmt; +type db name +PROCEDURE test proc_21 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +type db name +PROCEDURE test proc_21 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure proc_21; +create procedure proc_21() select "hi there, again"; +execute stmt; +type db name +PROCEDURE test proc_21 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +type db name +PROCEDURE test proc_21 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure proc_21; +===================================================================== +Testing 22: Special tables (views temp tables) +===================================================================== +drop table if exists t22_b; +drop view if exists t22; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t22_b(a int); +create algorithm=temptable view t22 as select a*a as a2 from t22_b; +show create view t22; +View Create View character_set_client collation_connection +t22 CREATE ALGORITHM=TEMPTABLE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `t22` AS select (`t22_b`.`a` * `t22_b`.`a`) AS `a2` from `t22_b` latin1 latin1_swedish_ci +prepare stmt from 'select * from t22'; +insert into t22_b values (1), (2), (3); +execute stmt; +a2 +1 +4 +9 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a2 +1 +4 +9 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +insert into t22_b values (4), (5), (6); +execute stmt; +a2 +1 +4 +9 +16 +25 +36 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a2 +1 +4 +9 +16 +25 +36 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t22_b; +drop view t22; +===================================================================== +Testing 23: Special tables (internal join tables) +===================================================================== +drop table if exists t23_a; +drop table if exists t23_b; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t23_a(a int); +create table t23_b(b int); +prepare stmt from 'select * from t23_a join t23_b'; +insert into t23_a values (1), (2), (3); +insert into t23_b values (10), (20), (30); +execute stmt; +a b +1 10 +2 10 +3 10 +1 20 +2 20 +3 20 +1 30 +2 30 +3 30 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a b +1 10 +2 10 +3 10 +1 20 +2 20 +3 20 +1 30 +2 30 +3 30 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +insert into t23_a values (4); +insert into t23_b values (40); +execute stmt; +a b +1 10 +2 10 +3 10 +4 10 +1 20 +2 20 +3 20 +4 20 +1 30 +2 30 +3 30 +4 30 +1 40 +2 40 +3 40 +4 40 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a b +1 10 +2 10 +3 10 +4 10 +1 20 +2 20 +3 20 +4 20 +1 30 +2 30 +3 30 +4 30 +1 40 +2 40 +3 40 +4 40 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t23_a; +drop table t23_b; +===================================================================== +Testing 24: Special statements +===================================================================== +drop table if exists t24_alter; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t24_alter(a int); +prepare stmt from 'alter table t24_alter add column b int'; +execute stmt; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_alter; +create table t24_alter(a1 int, a2 int); +execute stmt; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_alter drop column b; +execute stmt; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_alter drop column b; +execute stmt; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_alter; +drop table if exists t24_repair; +create table t24_repair(a int); +insert into t24_repair values (1), (2), (3); +prepare stmt from 'repair table t24_repair'; +execute stmt; +Table Op Msg_type Msg_text +test.t24_repair repair status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_repair; +create table t24_repair(a1 int, a2 int); +insert into t24_repair values (1, 10), (2, 20), (3, 30); +execute stmt; +Table Op Msg_type Msg_text +test.t24_repair repair status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_repair add column b varchar(50) default NULL; +execute stmt; +Table Op Msg_type Msg_text +test.t24_repair repair status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_repair drop column b; +execute stmt; +Table Op Msg_type Msg_text +test.t24_repair repair status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_repair; +drop table if exists t24_analyze; +create table t24_analyze(a int); +insert into t24_analyze values (1), (2), (3); +prepare stmt from 'analyze table t24_analyze'; +execute stmt; +Table Op Msg_type Msg_text +test.t24_analyze analyze status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_analyze; +create table t24_analyze(a1 int, a2 int); +insert into t24_analyze values (1, 10), (2, 20), (3, 30); +execute stmt; +Table Op Msg_type Msg_text +test.t24_analyze analyze status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_analyze add column b varchar(50) default NULL; +execute stmt; +Table Op Msg_type Msg_text +test.t24_analyze analyze status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_analyze drop column b; +execute stmt; +Table Op Msg_type Msg_text +test.t24_analyze analyze status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_analyze; +drop table if exists t24_optimize; +create table t24_optimize(a int); +insert into t24_optimize values (1), (2), (3); +prepare stmt from 'optimize table t24_optimize'; +execute stmt; +Table Op Msg_type Msg_text +test.t24_optimize optimize status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_optimize; +create table t24_optimize(a1 int, a2 int); +insert into t24_optimize values (1, 10), (2, 20), (3, 30); +execute stmt; +Table Op Msg_type Msg_text +test.t24_optimize optimize status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_optimize add column b varchar(50) default NULL; +execute stmt; +Table Op Msg_type Msg_text +test.t24_optimize optimize status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t24_optimize drop column b; +execute stmt; +Table Op Msg_type Msg_text +test.t24_optimize optimize status OK +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_optimize; +drop procedure if exists changing_proc; +prepare stmt from 'show create procedure changing_proc'; +execute stmt; +ERROR 42000: PROCEDURE changing_proc does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42000: PROCEDURE changing_proc does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +create procedure changing_proc() begin end; +execute stmt; +Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation +changing_proc CREATE DEFINER=`root`@`localhost` PROCEDURE `changing_proc`() +begin end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation +changing_proc CREATE DEFINER=`root`@`localhost` PROCEDURE `changing_proc`() +begin end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure changing_proc; +create procedure changing_proc(x int, y int) begin end; +execute stmt; +Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation +changing_proc CREATE DEFINER=`root`@`localhost` PROCEDURE `changing_proc`(x int, y int) +begin end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation +changing_proc CREATE DEFINER=`root`@`localhost` PROCEDURE `changing_proc`(x int, y int) +begin end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop procedure changing_proc; +execute stmt; +ERROR 42000: PROCEDURE changing_proc does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42000: PROCEDURE changing_proc does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function if exists changing_func; +prepare stmt from 'show create function changing_func'; +execute stmt; +ERROR 42000: FUNCTION changing_func does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42000: FUNCTION changing_func does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +create function changing_func() returns int return 0; +execute stmt; +Function sql_mode Create Function character_set_client collation_connection Database Collation +changing_func CREATE DEFINER=`root`@`localhost` FUNCTION `changing_func`() RETURNS int(11) +return 0 latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Function sql_mode Create Function character_set_client collation_connection Database Collation +changing_func CREATE DEFINER=`root`@`localhost` FUNCTION `changing_func`() RETURNS int(11) +return 0 latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function changing_func; +create function changing_func(x int, y int) returns int return x+y; +execute stmt; +Function sql_mode Create Function character_set_client collation_connection Database Collation +changing_func CREATE DEFINER=`root`@`localhost` FUNCTION `changing_func`(x int, y int) RETURNS int(11) +return x+y latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Function sql_mode Create Function character_set_client collation_connection Database Collation +changing_func CREATE DEFINER=`root`@`localhost` FUNCTION `changing_func`(x int, y int) RETURNS int(11) +return x+y latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function changing_func; +execute stmt; +ERROR 42000: FUNCTION changing_func does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42000: FUNCTION changing_func does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table if exists t24_trigger; +create table t24_trigger(a int); +prepare stmt from 'show create trigger t24_bi;'; +execute stmt; +ERROR HY000: Trigger does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR HY000: Trigger does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +create trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi"; +end +$$ +execute stmt; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +t24_bi CREATE DEFINER=`root`@`localhost` trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi"; +end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +t24_bi CREATE DEFINER=`root`@`localhost` trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi"; +end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop trigger t24_bi; +create trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi (2)"; +end +$$ +execute stmt; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +t24_bi CREATE DEFINER=`root`@`localhost` trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi (2)"; +end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +t24_bi CREATE DEFINER=`root`@`localhost` trigger t24_bi before insert on t24_trigger for each row +begin +set @message= "t24_bi (2)"; +end latin1 latin1_swedish_ci latin1_swedish_ci +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop trigger t24_bi; +execute stmt; +ERROR HY000: Trigger does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR HY000: Trigger does not exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t24_trigger; +===================================================================== +Testing 25: Testing the strength of TABLE_SHARE version +===================================================================== +drop table if exists t25_num_col; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t25_num_col(a int); +prepare stmt from 'select * from t25_num_col'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +alter table t25_num_col add column b varchar(50) default NULL; +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table t25_num_col; +drop table if exists t25_col_name; +create table t25_col_name(a int); +prepare stmt from 'select * from t25_col_name'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +alter table t25_col_name change a b int; +execute stmt; +b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +execute stmt; +b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +drop table t25_col_name; +drop table if exists t25_col_type; +create table t25_col_type(a int); +prepare stmt from 'select * from t25_col_type'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +alter table t25_col_type change a a varchar(10); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +drop table t25_col_type; +drop table if exists t25_col_type_length; +create table t25_col_type_length(a varchar(10)); +prepare stmt from 'select * from t25_col_type_length'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +3 +alter table t25_col_type_length change a a varchar(20); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +drop table t25_col_type_length; +drop table if exists t25_col_null; +create table t25_col_null(a varchar(10)); +prepare stmt from 'select * from t25_col_null'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +4 +alter table t25_col_null change a a varchar(10) NOT NULL; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +drop table t25_col_null; +drop table if exists t25_col_default; +create table t25_col_default(a int, b int DEFAULT 10); +prepare stmt from 'insert into t25_col_default(a) values (?)'; +set @val=1; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +set @val=2; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +alter table t25_col_default change b b int DEFAULT 20; +set @val=3; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +set @val=4; +execute stmt using @val; +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +select * from t25_col_default; +a b +1 10 +2 10 +3 20 +4 20 +drop table t25_col_default; +drop table if exists t25_index; +create table t25_index(a varchar(10)); +prepare stmt from 'select * from t25_index'; +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +5 +create index i1 on t25_index(a); +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +6 +execute stmt; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +6 +drop table t25_index; +drop table if exists t25_index_unique; +create table t25_index_unique(a varchar(10), b varchar(10)); +create index i1 on t25_index_unique(a, b); +show create table t25_index_unique; +Table Create Table +t25_index_unique CREATE TABLE `t25_index_unique` ( + `a` varchar(10) DEFAULT NULL, + `b` varchar(10) DEFAULT NULL, + KEY `i1` (`a`,`b`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +prepare stmt from 'select * from t25_index_unique'; +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +6 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +6 +alter table t25_index_unique drop index i1; +create unique index i1 on t25_index_unique(a, b); +show create table t25_index_unique; +Table Create Table +t25_index_unique CREATE TABLE `t25_index_unique` ( + `a` varchar(10) DEFAULT NULL, + `b` varchar(10) DEFAULT NULL, + UNIQUE KEY `i1` (`a`,`b`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +7 +execute stmt; +a b +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +7 +drop table t25_index_unique; +===================================================================== +Testing reported bugs +===================================================================== +drop table if exists table_12093; +drop function if exists func_12093; +drop function if exists func_12093_unrelated; +drop procedure if exists proc_12093; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table table_12093(a int); +create function func_12093() +returns int +begin +return (select count(*) from table_12093); +end// +create procedure proc_12093(a int) +begin +select * from table_12093; +end// +create function func_12093_unrelated() returns int return 2; +create procedure proc_12093_unrelated() begin end; +prepare stmt_sf from 'select func_12093();'; +prepare stmt_sp from 'call proc_12093(func_12093())'; +execute stmt_sf; +func_12093() +0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_sp; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function func_12093_unrelated; +drop procedure proc_12093_unrelated; +execute stmt_sf; +func_12093() +0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_sp; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_sf; +func_12093() +0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt_sp; +a +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +deallocate prepare stmt_sf; +deallocate prepare stmt_sp; +drop table table_12093; +drop function func_12093; +drop procedure proc_12093; +drop function if exists func_21294; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create function func_21294() returns int return 10; +prepare stmt from "select func_21294()"; +execute stmt; +func_21294() +10 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function func_21294; +create function func_21294() returns int return 10; +execute stmt; +func_21294() +10 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop function func_21294; +create function func_21294() returns int return 20; +execute stmt; +func_21294() +20 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +deallocate prepare stmt; +drop function func_21294; +drop table if exists t_27420_100; +drop table if exists t_27420_101; +drop view if exists v_27420; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t_27420_100(a int); +insert into t_27420_100 values (1), (2); +create table t_27420_101(a int); +insert into t_27420_101 values (1), (2); +create view v_27420 as select t_27420_100.a X, t_27420_101.a Y +from t_27420_100, t_27420_101 +where t_27420_100.a=t_27420_101.a; +prepare stmt from 'select * from v_27420'; +execute stmt; +X Y +1 1 +2 2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop view v_27420; +create table v_27420(X int, Y int); +execute stmt; +X Y +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +drop table v_27420; +create table v_27420 (a int, b int, filler char(200)); +execute stmt; +a b filler +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +2 +deallocate prepare stmt; +drop table t_27420_100; +drop table t_27420_101; +drop table v_27420; +drop table if exists t_27430_1; +drop table if exists t_27430_2; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t_27430_1 (a int not null, oref int not null, key(a)); +insert into t_27430_1 values +(1, 1), +(1, 1234), +(2, 3), +(2, 1234), +(3, 1234); +create table t_27430_2 (a int not null, oref int not null); +insert into t_27430_2 values +(1, 1), +(2, 2), +(1234, 3), +(1234, 4); +prepare stmt from +'select oref, a, a in (select a from t_27430_1 where oref=t_27430_2.oref) Z from t_27430_2'; +execute stmt; +oref a Z +1 1 1 +2 2 0 +3 1234 0 +4 1234 0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +oref a Z +1 1 1 +2 2 0 +3 1234 0 +4 1234 0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table t_27430_1, t_27430_2; +create table t_27430_1 (a int, oref int, key(a)); +insert into t_27430_1 values +(1, 1), +(1, NULL), +(2, 3), +(2, NULL), +(3, NULL); +create table t_27430_2 (a int, oref int); +insert into t_27430_2 values +(1, 1), +(2,2), +(NULL, 3), +(NULL, 4); +execute stmt; +oref a Z +1 1 1 +2 2 0 +3 NULL NULL +4 NULL 0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +oref a Z +1 1 1 +2 2 0 +3 NULL NULL +4 NULL 0 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +deallocate prepare stmt; +drop table t_27430_1; +drop table t_27430_2; +drop table if exists t_27690_1; +drop view if exists v_27690_1; +drop table if exists v_27690_2; +SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; +create table t_27690_1 (a int, b int); +insert into t_27690_1 values (1,1),(2,2); +create table v_27690_1 as select * from t_27690_1; +create table v_27690_2 as select * from t_27690_1; +prepare stmt from 'select * from v_27690_1, v_27690_2'; +execute stmt; +a b a b +1 1 1 1 +2 2 1 1 +1 1 2 2 +2 2 2 2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +a b a b +1 1 1 1 +2 2 1 1 +1 1 2 2 +2 2 2 2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +drop table v_27690_1; +execute stmt; +ERROR 42S02: Table 'test.v_27690_1' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +execute stmt; +ERROR 42S02: Table 'test.v_27690_1' doesn't exist +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +0 +create view v_27690_1 as select A.a, A.b from t_27690_1 A, t_27690_1 B; +execute stmt; +a b a b +1 1 1 1 +2 2 1 1 +1 1 1 1 +2 2 1 1 +1 1 2 2 +2 2 2 2 +1 1 2 2 +2 2 2 2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +execute stmt; +a b a b +1 1 1 1 +2 2 1 1 +1 1 1 1 +2 2 1 1 +1 1 2 2 +2 2 2 2 +1 1 2 2 +2 2 2 2 +SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; +REPREPARED +1 +deallocate prepare stmt; +drop table t_27690_1; +drop view v_27690_1; +drop table v_27690_2; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 2c16017241c..3f9ec52ca36 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -20,6 +20,6 @@ lowercase_table3 : Bug#32667 lowercase_table3.test reports to error log kill : Bug#29149: Test "kill" fails on Windows innodb_mysql : Bug#32724: innodb_mysql.test fails randomly wait_timeout : Bug#32801 wait_timeout.test fails randomly -kill : Bug#29149 Test "kill" fails on Windows ctype_create : Bug#32965 main.ctype_create fails status : Bug#32966 main.status fails +ps_ddl : Bug#12093 2007-12-14 pending WL#4165 / WL#4166 diff --git a/mysql-test/t/ps_ddl.test b/mysql-test/t/ps_ddl.test new file mode 100644 index 00000000000..abb6563f052 --- /dev/null +++ b/mysql-test/t/ps_ddl.test @@ -0,0 +1,1851 @@ +# +# Testing the behavior of 'PREPARE', 'DDL', 'EXECUTE' scenarios +# +# Background: +# In a statement like "select * from t1", t1 can be: +# - nothing (the table does not exist) +# - a real table +# - a temporary table +# - a view +# +# Changing the nature of "t1" between a PREPARE and an EXECUTE +# can invalidate the internal state of a prepared statement, so that, +# during the execute, the server should: +# - detect state changes and fail to execute a statement, +# instead of crashing the server or returning wrong results +# - "RE-PREPARE" the statement to restore a valid internal state. +# +# Also, changing the physical structure of "t1", by: +# - changing the definition of t1 itself (DDL on tables, views) +# - changing TRIGGERs associated with a table +# - changing PROCEDURE, FUNCTION referenced by a TRIGGER body, +# - changing PROCEDURE, FUNCTION referenced by a VIEW body, +# impacts the internal structure of a prepared statement, and should +# cause the same verifications at execute time to be performed. +# +# This test provided in this file cover the different state transitions +# between a PREPARE and an EXECUTE, and are organized as follows: +# - Part 1: NOTHING -> TABLE +# - Part 2: NOTHING -> TEMPORARY TABLE +# - Part 3: NOTHING -> VIEW +# - Part 4: TABLE -> NOTHING +# - Part 5: TABLE -> TABLE (DDL) +# - Part 6: TABLE -> TABLE (TRIGGER) +# - Part 7: TABLE -> TABLE (TRIGGER dependencies) +# - Part 8: TABLE -> TEMPORARY TABLE +# - Part 9: TABLE -> VIEW +# - Part 10: TEMPORARY TABLE -> NOTHING +# - Part 11: TEMPORARY TABLE -> TABLE +# - Part 12: TEMPORARY TABLE -> TEMPORARY TABLE (DDL) +# - Part 13: TEMPORARY TABLE -> VIEW +# - Part 14: VIEW -> NOTHING +# - Part 15: VIEW -> TABLE +# - Part 16: VIEW -> TEMPORARY TABLE +# - Part 17: VIEW -> VIEW (DDL) +# - Part 18: VIEW -> VIEW (VIEW dependencies) +# - Part 19: Special tables (INFORMATION_SCHEMA) +# - Part 20: Special tables (log tables) +# - Part 21: Special tables (system tables) +# - Part 22: Special tables (views temp tables) +# - Part 23: Special tables (internal join tables) +# - Part 24: Special statements +# - Part 25: Testing the strength of TABLE_SHARE version + +let $base_count = SELECT VARIABLE_VALUE from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' +into @base_count ; + +let $reprepared = SELECT VARIABLE_VALUE - @base_count AS REPREPARED from +INFORMATION_SCHEMA.SESSION_STATUS where variable_name='COM_STMT_REPREPARE' ; + +--echo ===================================================================== +--echo Testing 1: NOTHING -> TABLE transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t1; +--enable_warnings + +eval $base_count; + +# can not be tested since prepare failed +--error ER_NO_SUCH_TABLE +prepare stmt from 'select * from t1'; + +--echo ===================================================================== +--echo Testing 2: NOTHING -> TEMPORARY TABLE transitions +--echo ===================================================================== + +# can not be tested + +--echo ===================================================================== +--echo Testing 3: NOTHING -> VIEW transitions +--echo ===================================================================== + +# can not be tested + +--echo ===================================================================== +--echo Testing 4: TABLE -> NOTHING transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t4; +--enable_warnings + +eval $base_count; + +create table t4(a int); + +prepare stmt from 'select * from t4'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t4; +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; + +--echo ===================================================================== +--echo Testing 5: TABLE -> TABLE (DDL) transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t5; +--enable_warnings + +eval $base_count; + +create table t5(a int); + +prepare stmt from 'select * from t5'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t5 add column (b int); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t5; + +--echo ===================================================================== +--echo Testing 6: TABLE -> TABLE (TRIGGER) transitions +--echo ===================================================================== + +# +# Test 6-a: adding a relevant trigger +# Test 6-b: adding an irrelevant trigger +# Test 6-c: changing a relevant trigger +# Test 6-d: changing an irrelevant trigger +# Test 6-e: removing a relevant trigger +# Test 6-f: removing an irrelevant trigger +# + +--disable_warnings +drop table if exists t6; +--enable_warnings + +eval $base_count; + +create table t6(a int); + +prepare stmt from 'insert into t6(a) value (?)'; +set @val=1; +execute stmt using @val; +eval $reprepared; +set @val=2; +execute stmt using @val; +eval $reprepared; + +# Relevant trigger: execute should reprepare +delimiter $$; +create trigger t6_bi before insert on t6 for each row + begin + set @message= "t6_bi"; + end +$$ +delimiter ;$$ + +set @message="none"; +set @val=3; +# REPREPARED +1 +execute stmt using @val; +eval $reprepared; +select @message; +set @val=4; +execute stmt using @val; +eval $reprepared; +select @message; + +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=5; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=6; +execute stmt using @val; +eval $reprepared; +select @message; + +# Unrelated trigger: execute can pass of fail, implementation dependent +delimiter $$; +create trigger t6_bd before delete on t6 for each row + begin + set @message= "t6_bd"; + end +$$ +delimiter ;$$ + +set @message="none"; +set @val=7; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=8; +execute stmt using @val; +eval $reprepared; +select @message; + +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=9; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=10; +execute stmt using @val; +eval $reprepared; +select @message; + +# Relevant trigger: execute should reprepare +drop trigger t6_bi; +delimiter $$; +create trigger t6_bi before insert on t6 for each row + begin + set @message= "t6_bi (2)"; + end +$$ +delimiter ;$$ + +set @message="none"; +set @val=11; +# REPREPARED +1 +execute stmt using @val; +eval $reprepared; +select @message; +set @val=12; +execute stmt using @val; +eval $reprepared; +select @message; + +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=13; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=14; +execute stmt using @val; +eval $reprepared; +select @message; + +# Unrelated trigger: execute can pass of fail, implementation dependent +drop trigger t6_bd; +delimiter $$; +create trigger t6_bd before delete on t6 for each row + begin + set @message= "t6_bd (2)"; + end +$$ +delimiter ;$$ + +set @message="none"; +set @val=15; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=16; +execute stmt using @val; +eval $reprepared; +select @message; + +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=17; +execute stmt using @val; +eval $reprepared; +select @message; +set @message="none"; +set @val=18; +execute stmt using @val; +eval $reprepared; +select @message; + +drop trigger t6_bi; + +set @message="none"; +set @val=19; +# safe to re-execute +execute stmt using @val; +eval $reprepared; +select @message; +set @val=20; +# safe to re-execute +execute stmt using @val; +eval $reprepared; +select @message; + +prepare stmt from 'insert into t6(a) value (?)'; +set @message="none"; +set @val=21; +execute stmt using @val; +eval $reprepared; +select @message; +set @val=22; +execute stmt using @val; +eval $reprepared; +select @message; + +drop trigger t6_bd; + +set @val=23; +# safe to re-execute +execute stmt using @val; +eval $reprepared; +select @message; +set @val=24; +# safe to re-execute +execute stmt using @val; +eval $reprepared; +select @message; + +select * from t6 order by a; +drop table t6; + +--echo ===================================================================== +--echo Testing 7: TABLE -> TABLE (TRIGGER dependencies) transitions +--echo ===================================================================== + +# +# Test 7-a: dependent PROCEDURE has changed +# Test 7-b: dependent FUNCTION has changed +# Test 7-c: dependent VIEW has changed +# Test 7-d: dependent TABLE has changed +# Test 7-e: dependent TABLE TRIGGER has changed +# + +--disable_warnings +drop table if exists t7_proc; +drop table if exists t7_func; +drop table if exists t7_view; +drop table if exists t7_table; +drop table if exists t7_dependent_table; +drop table if exists t7_table_trigger; +drop table if exists t7_audit; +drop procedure if exists audit_proc; +drop function if exists audit_func; +drop view if exists audit_view; +--enable_warnings + +eval $base_count; + +create table t7_proc(a int); +create table t7_func(a int); +create table t7_view(a int); +create table t7_table(a int); +create table t7_table_trigger(a int); + +create table t7_audit(old_a int, new_a int, reason varchar(50)); +create table t7_dependent_table(old_a int, new_a int, reason varchar(50)); + +create procedure audit_proc(a int) + insert into t7_audit values (NULL, a, "proc v1"); + +create function audit_func() returns varchar(50) + return "func v1"; + +create view audit_view as select "view v1" as reason from dual; + +create trigger t7_proc_bi before insert on t7_proc for each row + call audit_proc(NEW.a); + +create trigger t7_func_bi before insert on t7_func for each row + insert into t7_audit values (NULL, NEW.a, audit_func()); + +create trigger t7_view_bi before insert on t7_view for each row + insert into t7_audit values (NULL, NEW.a, (select reason from audit_view)); + +create trigger t7_table_bi before insert on t7_table for each row + insert into t7_dependent_table values (NULL, NEW.a, "dependent table"); + +create trigger t7_table_trigger_bi before insert on t7_dependent_table + for each row set NEW.reason="trigger v1"; + +prepare stmt_proc from 'insert into t7_proc(a) value (?)'; +set @val=101; +execute stmt_proc using @val; +eval $reprepared; +set @val=102; +execute stmt_proc using @val; +eval $reprepared; + +drop procedure audit_proc; + +create procedure audit_proc(a int) + insert into t7_audit values (NULL, a, "proc v2"); + +set @val=103; +execute stmt_proc using @val; +eval $reprepared; +set @val=104; +execute stmt_proc using @val; +eval $reprepared; + + +prepare stmt_func from 'insert into t7_func(a) value (?)'; +set @val=201; +execute stmt_func using @val; +eval $reprepared; +set @val=202; +execute stmt_func using @val; +eval $reprepared; + +drop function audit_func; + +create function audit_func() returns varchar(50) + return "func v2"; + +set @val=203; +execute stmt_func using @val; +eval $reprepared; +set @val=204; +execute stmt_func using @val; +eval $reprepared; + +prepare stmt_view from 'insert into t7_view(a) value (?)'; +set @val=301; +execute stmt_view using @val; +eval $reprepared; +set @val=302; +execute stmt_view using @val; +eval $reprepared; + +drop view audit_view; + +create view audit_view as select "view v2" as reason from dual; + +# Because of Bug#33255, the wrong result is still produced for cases +# 303 and 304, even after re-preparing the statement. +# This is because the table trigger is cached and is not invalidated. + +set @val=303; +# REPREPARED +1 +execute stmt_view using @val; +eval $reprepared; +set @val=304; +execute stmt_view using @val; +eval $reprepared; + + +prepare stmt_table from 'insert into t7_table(a) value (?)'; +set @val=401; +execute stmt_table using @val; +eval $reprepared; +set @val=402; +execute stmt_table using @val; +eval $reprepared; + +alter table t7_dependent_table add column comments varchar(100) default NULL; + +set @val=403; +# REPREPARED +1 +--error ER_WRONG_VALUE_COUNT_ON_ROW +execute stmt_table using @val; +eval $reprepared; +set @val=404; +--error ER_WRONG_VALUE_COUNT_ON_ROW +execute stmt_table using @val; +eval $reprepared; + +alter table t7_dependent_table drop column comments; + +set @val=405; +# REPREPARED +1 +execute stmt_table using @val; +eval $reprepared; +set @val=406; +execute stmt_table using @val; +eval $reprepared; + + +prepare stmt_table_trigger from 'insert into t7_table(a) value (?)'; +set @val=501; +execute stmt_table_trigger using @val; +eval $reprepared; +set @val=502; +execute stmt_table_trigger using @val; +eval $reprepared; + +drop trigger t7_table_trigger_bi; + +create trigger t7_table_trigger_bi before insert on t7_dependent_table + for each row set NEW.reason="trigger v2"; + +set @val=503; +# REPREPARED +1 +execute stmt_table_trigger using @val; +eval $reprepared; +set @val=504; +execute stmt_table_trigger using @val; +eval $reprepared; + +select * from t7_audit order by new_a; + +select * from t7_dependent_table order by new_a; + +drop table t7_proc; +drop table t7_func; +drop table t7_view; +drop table t7_table; +drop table t7_dependent_table; +drop table t7_table_trigger; +drop table t7_audit; +drop procedure audit_proc; +drop function audit_func; +drop view audit_view; + +--echo ===================================================================== +--echo Testing 8: TABLE -> TEMPORARY TABLE transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t8; +--enable_warnings + +eval $base_count; + +create table t8(a int); + +prepare stmt from 'select * from t8'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t8; +create temporary table t8(a int); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t8; + +--echo ===================================================================== +--echo Testing 9: TABLE -> VIEW transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t9; +drop table if exists t9_b; +--enable_warnings + +eval $base_count; + +create table t9(a int); +create table t9_b(a int); + +prepare stmt from 'select * from t9'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t9; +create view t9 as select * from t9_b; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t9; +drop table t9_b; + +--echo ===================================================================== +--echo Testing 10: TEMPORARY TABLE -> NOTHING transitions +--echo ===================================================================== + +--disable_warnings +drop temporary table if exists t10; +--enable_warnings + +eval $base_count; + +create temporary table t10(a int); + +prepare stmt from 'select * from t10'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop temporary table t10; +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; + +--echo ===================================================================== +--echo Testing 11: TEMPORARY TABLE -> TABLE transitions +--echo ===================================================================== + +--disable_warnings +drop table if exists t11; +drop temporary table if exists t11; +--enable_warnings + +eval $base_count; + +create table t11(a int); +insert into t11(a) value (1); +create temporary table t11(a int); + +prepare stmt from 'select * from t11'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop temporary table t11; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +select * from t11; +drop table t11; + +--echo ===================================================================== +--echo Testing 12: TEMPORARY TABLE -> TEMPORARY TABLE (DDL) transitions +--echo ===================================================================== + +--disable_warnings +drop temporary table if exists t12; +--enable_warnings + +eval $base_count; + +create temporary table t12(a int); + +prepare stmt from 'select * from t12'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop temporary table t12; +create temporary table t12(a int, b int); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +select * from t12; +drop table t12; + +--echo ===================================================================== +--echo Testing 13: TEMPORARY TABLE -> VIEW transitions +--echo ===================================================================== + +--disable_warnings +drop temporary table if exists t13; +drop table if exists t13_b; +--enable_warnings + +eval $base_count; + +create temporary table t13(a int); +create table t13_b(a int); + +prepare stmt from 'select * from t13'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop temporary table t13; +create view t13 as select * from t13_b; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t13; +drop table t13_b; + +--echo ===================================================================== +--echo Testing 14: VIEW -> NOTHING transitions +--echo ===================================================================== + +--disable_warnings +drop view if exists t14; +drop table if exists t14_b; +--enable_warnings + +eval $base_count; + +create table t14_b(a int); +create view t14 as select * from t14_b; + +prepare stmt from 'select * from t14'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t14; + +# REPREPARED +1 +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; + +drop table t14_b; + +--echo ===================================================================== +--echo Testing 15: VIEW -> TABLE transitions +--echo ===================================================================== + +--disable_warnings +drop view if exists t15; +drop table if exists t15_b; +--enable_warnings + +eval $base_count; + +create table t15_b(a int); +create view t15 as select * from t15_b; + +prepare stmt from 'select * from t15'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t15; +create table t15(a int); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t15_b; +drop table t15; + +--echo ===================================================================== +--echo Testing 16: VIEW -> TEMPORARY TABLE transitions +--echo ===================================================================== + +--disable_warnings +drop view if exists t16; +drop table if exists t16_b; +--enable_warnings + +eval $base_count; + +create table t16_b(a int); +create view t16 as select * from t16_b; + +prepare stmt from 'select * from t16'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t16; +create temporary table t16(a int); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t16_b; +drop temporary table t16; + +--echo ===================================================================== +--echo Testing 17: VIEW -> VIEW (DDL) transitions +--echo ===================================================================== + +--disable_warnings +drop view if exists t17; +drop table if exists t17_b; +--enable_warnings + +eval $base_count; + +create table t17_b(a int); +insert into t17_b values (10), (20), (30); + +create view t17 as select a, 2*a as b, 3*a as c from t17_b; +select * from t17; + +prepare stmt from 'select * from t17'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop view t17; +create view t17 as select a, 2*a as b, 10*a as c from t17_b; +select * from t17; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t17_b; +drop view t17; + +--echo ===================================================================== +--echo Testing 18: VIEW -> VIEW (VIEW dependencies) transitions +--echo ===================================================================== + +# +# Test 18-a: dependent PROCEDURE has changed (via a trigger) +# Test 18-b: dependent FUNCTION has changed +# Test 18-c: dependent VIEW has changed +# Test 18-d: dependent TABLE has changed +# Test 18-e: dependent TABLE TRIGGER has changed +# + +--disable_warnings +drop table if exists t18; +drop table if exists t18_dependent_table; +drop view if exists t18_func; +drop view if exists t18_view; +drop view if exists t18_table; +drop function if exists view_func; +drop view if exists view_view; +--enable_warnings + +eval $base_count; + +# TODO: insertable view -> trigger +# TODO: insertable view -> trigger -> proc ? + +create table t18(a int); +insert into t18 values (1), (2), (3); + +create function view_func(x int) returns int + return x+1; + +create view view_view as select "view v1" as reason from dual; + +create table t18_dependent_table(a int); + +create view t18_func as select a, view_func(a) as b from t18; +create view t18_view as select a, reason as b from t18, view_view; +create view t18_table as select * from t18; + +prepare stmt_func from 'select * from t18_func'; +execute stmt_func; +eval $reprepared; +execute stmt_func; +eval $reprepared; + +drop function view_func; +create function view_func(x int) returns int + return x*x; + +execute stmt_func; +eval $reprepared; +execute stmt_func; +eval $reprepared; + +prepare stmt_view from 'select * from t18_view'; +execute stmt_view; +eval $reprepared; +execute stmt_view; +eval $reprepared; + +drop view view_view; +create view view_view as select "view v2" as reason from dual; + +# REPREPARED +1 +execute stmt_view; +eval $reprepared; +execute stmt_view; +eval $reprepared; + +prepare stmt_table from 'select * from t18_table'; +execute stmt_table; +eval $reprepared; +execute stmt_table; +eval $reprepared; + +alter table t18 add column comments varchar(50) default NULL; + +# REPREPARED +1 +execute stmt_table; +eval $reprepared; +execute stmt_table; +eval $reprepared; + +drop table t18; +drop table t18_dependent_table; +drop view t18_func; +drop view t18_view; +drop view t18_table; +drop function view_func; +drop view view_view; + +--echo ===================================================================== +--echo Testing 19: Special tables (INFORMATION_SCHEMA) +--echo ===================================================================== + +--disable_warnings +drop procedure if exists proc_19; +--enable_warnings + +eval $base_count; + +# Using a temporary table internally should not confuse the prepared +# statement code, and should not raise ER_PS_INVALIDATED errors +prepare stmt from + 'select ROUTINE_SCHEMA, ROUTINE_NAME, ROUTINE_TYPE + from INFORMATION_SCHEMA.ROUTINES where + routine_name=\'proc_19\''; + +create procedure proc_19() select "hi there"; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure proc_19; +create procedure proc_19() select "hi there, again"; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure proc_19; + +--echo ===================================================================== +--echo Testing 20: Special tables (log tables) +--echo ===================================================================== + +eval $base_count; + +prepare stmt from + 'select * from mysql.general_log where argument=\'IMPOSSIBLE QUERY STRING\''; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +--echo ===================================================================== +--echo Testing 21: Special tables (system tables) +--echo ===================================================================== + +--disable_warnings +drop procedure if exists proc_21; +--enable_warnings + +eval $base_count; + +prepare stmt from + 'select type, db, name from mysql.proc where name=\'proc_21\''; + +create procedure proc_21() select "hi there"; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure proc_21; +create procedure proc_21() select "hi there, again"; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure proc_21; + +--echo ===================================================================== +--echo Testing 22: Special tables (views temp tables) +--echo ===================================================================== + +--disable_warnings +drop table if exists t22_b; +drop view if exists t22; +--enable_warnings + +eval $base_count; + +create table t22_b(a int); + +create algorithm=temptable view t22 as select a*a as a2 from t22_b; + +# Using a temporary table internally should not confuse the prepared +# statement code, and should not raise ER_PS_INVALIDATED errors +show create view t22; + +prepare stmt from 'select * from t22'; + +insert into t22_b values (1), (2), (3); +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +insert into t22_b values (4), (5), (6); +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t22_b; +drop view t22; + +--echo ===================================================================== +--echo Testing 23: Special tables (internal join tables) +--echo ===================================================================== + +--disable_warnings +drop table if exists t23_a; +drop table if exists t23_b; +--enable_warnings + +eval $base_count; + +create table t23_a(a int); +create table t23_b(b int); + +# Using a temporary table internally should not confuse the prepared +# statement code, and should not raise ER_PS_INVALIDATED errors +prepare stmt from 'select * from t23_a join t23_b'; + +insert into t23_a values (1), (2), (3); +insert into t23_b values (10), (20), (30); +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +insert into t23_a values (4); +insert into t23_b values (40); +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t23_a; +drop table t23_b; + +--echo ===================================================================== +--echo Testing 24: Special statements +--echo ===================================================================== + +# SQLCOM_ALTER_TABLE: + +--disable_warnings +drop table if exists t24_alter; +--enable_warnings + +eval $base_count; + +create table t24_alter(a int); + +prepare stmt from 'alter table t24_alter add column b int'; +execute stmt; +eval $reprepared; + +drop table t24_alter; +create table t24_alter(a1 int, a2 int); + +# t24_alter has changed, and it's not a problem +execute stmt; +eval $reprepared; + +alter table t24_alter drop column b; +execute stmt; +eval $reprepared; + +alter table t24_alter drop column b; +execute stmt; +eval $reprepared; + +drop table t24_alter; + +# SQLCOM_REPAIR: + +--disable_warnings +drop table if exists t24_repair; +--enable_warnings + +create table t24_repair(a int); +insert into t24_repair values (1), (2), (3); + +prepare stmt from 'repair table t24_repair'; +execute stmt; +eval $reprepared; + +drop table t24_repair; +create table t24_repair(a1 int, a2 int); +insert into t24_repair values (1, 10), (2, 20), (3, 30); + +# t24_repair has changed, and it's not a problem +execute stmt; +eval $reprepared; + +alter table t24_repair add column b varchar(50) default NULL; +execute stmt; +eval $reprepared; + +alter table t24_repair drop column b; +execute stmt; +eval $reprepared; + +drop table t24_repair; + +# SQLCOM_ANALYZE: + +--disable_warnings +drop table if exists t24_analyze; +--enable_warnings + +create table t24_analyze(a int); +insert into t24_analyze values (1), (2), (3); + +prepare stmt from 'analyze table t24_analyze'; +execute stmt; +eval $reprepared; + +drop table t24_analyze; +create table t24_analyze(a1 int, a2 int); +insert into t24_analyze values (1, 10), (2, 20), (3, 30); + +# t24_analyze has changed, and it's not a problem +execute stmt; +eval $reprepared; + +alter table t24_analyze add column b varchar(50) default NULL; +execute stmt; +eval $reprepared; + +alter table t24_analyze drop column b; +execute stmt; +eval $reprepared; + +drop table t24_analyze; + +# SQLCOM_OPTIMIZE: + +--disable_warnings +drop table if exists t24_optimize; +--enable_warnings + +create table t24_optimize(a int); +insert into t24_optimize values (1), (2), (3); + +prepare stmt from 'optimize table t24_optimize'; +execute stmt; +eval $reprepared; + +drop table t24_optimize; +create table t24_optimize(a1 int, a2 int); +insert into t24_optimize values (1, 10), (2, 20), (3, 30); + +# t24_optimize has changed, and it's not a problem +execute stmt; +eval $reprepared; + +alter table t24_optimize add column b varchar(50) default NULL; +execute stmt; +eval $reprepared; + +alter table t24_optimize drop column b; +execute stmt; +eval $reprepared; + +drop table t24_optimize; + +# SQLCOM_SHOW_CREATE_PROC: + +--disable_warnings +drop procedure if exists changing_proc; +--enable_warnings + +prepare stmt from 'show create procedure changing_proc'; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +create procedure changing_proc() begin end; + +# changing_proc has changed, and it's not a problem +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure changing_proc; +create procedure changing_proc(x int, y int) begin end; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop procedure changing_proc; + +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +# SQLCOM_SHOW_CREATE_FUNC: + +--disable_warnings +drop function if exists changing_func; +--enable_warnings + +prepare stmt from 'show create function changing_func'; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +create function changing_func() returns int return 0; + +# changing_proc has changed, and it's not a problem +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop function changing_func; +create function changing_func(x int, y int) returns int return x+y; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop function changing_func; + +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_SP_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +# SQLCOM_SHOW_CREATE_TRIGGER: + +--disable_warnings +drop table if exists t24_trigger; +--enable_warnings + +create table t24_trigger(a int); + +prepare stmt from 'show create trigger t24_bi;'; +--error ER_TRG_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_TRG_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +delimiter $$; +create trigger t24_bi before insert on t24_trigger for each row + begin + set @message= "t24_bi"; + end +$$ +delimiter ;$$ + +# t24_bi has changed, and it's not a problem +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop trigger t24_bi; +delimiter $$; +create trigger t24_bi before insert on t24_trigger for each row + begin + set @message= "t24_bi (2)"; + end +$$ +delimiter ;$$ + +# t24_bi has changed, and it's not a problem +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop trigger t24_bi; + +--error ER_TRG_DOES_NOT_EXIST +execute stmt; +eval $reprepared; +--error ER_TRG_DOES_NOT_EXIST +execute stmt; +eval $reprepared; + +drop table t24_trigger; + +--echo ===================================================================== +--echo Testing 25: Testing the strength of TABLE_SHARE version +--echo ===================================================================== + +# Test 25-a: number of columns + +--disable_warnings +drop table if exists t25_num_col; +--enable_warnings + +eval $base_count; + +create table t25_num_col(a int); + +prepare stmt from 'select * from t25_num_col'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_num_col add column b varchar(50) default NULL; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_num_col; + +# Test 25-b: column name + +--disable_warnings +drop table if exists t25_col_name; +--enable_warnings + +create table t25_col_name(a int); + +prepare stmt from 'select * from t25_col_name'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_col_name change a b int; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_col_name; + +# Test 25-c: column type + +--disable_warnings +drop table if exists t25_col_type; +--enable_warnings + +create table t25_col_type(a int); + +prepare stmt from 'select * from t25_col_type'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_col_type change a a varchar(10); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_col_type; + +# Test 25-d: column type length + +--disable_warnings +drop table if exists t25_col_type_length; +--enable_warnings + +create table t25_col_type_length(a varchar(10)); + +prepare stmt from 'select * from t25_col_type_length'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_col_type_length change a a varchar(20); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_col_type_length; + +# Test 25-e: column NULL property + +--disable_warnings +drop table if exists t25_col_null; +--enable_warnings + +create table t25_col_null(a varchar(10)); + +prepare stmt from 'select * from t25_col_null'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_col_null change a a varchar(10) NOT NULL; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_col_null; + +# Test 25-f: column DEFAULT + +--disable_warnings +drop table if exists t25_col_default; +--enable_warnings + +create table t25_col_default(a int, b int DEFAULT 10); + +prepare stmt from 'insert into t25_col_default(a) values (?)'; +set @val=1; +execute stmt using @val; +eval $reprepared; +set @val=2; +execute stmt using @val; +eval $reprepared; + +alter table t25_col_default change b b int DEFAULT 20; + +set @val=3; +# Must insert the correct default value for b +execute stmt using @val; +eval $reprepared; + +set @val=4; +# Must insert the correct default value for b +execute stmt using @val; +eval $reprepared; + +select * from t25_col_default; + +drop table t25_col_default; + +# Test 25-g: number of keys + +--disable_warnings +drop table if exists t25_index; +--enable_warnings + +create table t25_index(a varchar(10)); + +prepare stmt from 'select * from t25_index'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +create index i1 on t25_index(a); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_index; + +# Test 25-h: changing index uniqueness + +--disable_warnings +drop table if exists t25_index_unique; +--enable_warnings + +create table t25_index_unique(a varchar(10), b varchar(10)); +create index i1 on t25_index_unique(a, b); + +show create table t25_index_unique; + +prepare stmt from 'select * from t25_index_unique'; +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +alter table t25_index_unique drop index i1; +create unique index i1 on t25_index_unique(a, b); + +show create table t25_index_unique; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t25_index_unique; + +--echo ===================================================================== +--echo Testing reported bugs +--echo ===================================================================== + +# +# Bug#12093 (SP not found on second PS execution if another thread drops +# other SP in between) +# + +--disable_warnings +drop table if exists table_12093; +drop function if exists func_12093; +drop function if exists func_12093_unrelated; +drop procedure if exists proc_12093; +--enable_warnings + +eval $base_count; + +connect (con1,localhost,root,,); + +connection default; + +create table table_12093(a int); + +delimiter //; + +create function func_12093() +returns int +begin + return (select count(*) from table_12093); +end// + +create procedure proc_12093(a int) +begin + select * from table_12093; +end// + +delimiter ;// + +create function func_12093_unrelated() returns int return 2; +create procedure proc_12093_unrelated() begin end; + +prepare stmt_sf from 'select func_12093();'; +prepare stmt_sp from 'call proc_12093(func_12093())'; + +execute stmt_sf; +eval $reprepared; +execute stmt_sp; +eval $reprepared; + +connection con1; + +drop function func_12093_unrelated; +drop procedure proc_12093_unrelated; + +connection default; + +# previously, failed with --error 1305 +execute stmt_sf; +eval $reprepared; +# previously, failed with --error 1305 +execute stmt_sp; +eval $reprepared; + +# previously, failed with --error 1305 +execute stmt_sf; +eval $reprepared; +# previously, failed with --error 1305 +execute stmt_sp; +eval $reprepared; + +deallocate prepare stmt_sf; +deallocate prepare stmt_sp; + +disconnect con1; + +drop table table_12093; +drop function func_12093; +drop procedure proc_12093; + +# +# Bug#21294 (executing a prepared statement that executes a stored function +# which was recreat) +# + +--disable_warnings +drop function if exists func_21294; +--enable_warnings + +eval $base_count; + +create function func_21294() returns int return 10; + +prepare stmt from "select func_21294()"; +execute stmt; +eval $reprepared; + +drop function func_21294; +create function func_21294() returns int return 10; + +# might pass or fail, implementation dependent +execute stmt; +eval $reprepared; + +drop function func_21294; +create function func_21294() returns int return 20; + +execute stmt; +eval $reprepared; + +deallocate prepare stmt; +drop function func_21294; + +# +# Bug#27420 (A combination of PS and view operations cause error + assertion +# on shutdown) +# + +--disable_warnings +drop table if exists t_27420_100; +drop table if exists t_27420_101; +drop view if exists v_27420; +--enable_warnings + +eval $base_count; + +connect (con1,localhost,root,,); + +connection default; + +create table t_27420_100(a int); +insert into t_27420_100 values (1), (2); + +create table t_27420_101(a int); +insert into t_27420_101 values (1), (2); + +create view v_27420 as select t_27420_100.a X, t_27420_101.a Y + from t_27420_100, t_27420_101 + where t_27420_100.a=t_27420_101.a; + +prepare stmt from 'select * from v_27420'; + +execute stmt; +eval $reprepared; + +connection con1; + +drop view v_27420; +create table v_27420(X int, Y int); + +connection default; + +# REPREPARED +1 +execute stmt; +eval $reprepared; + +connection con1; + +drop table v_27420; +# passes in 5.0, fails in 5.1, should pass +create table v_27420 (a int, b int, filler char(200)); + +connection default; + +# REPREPARED +1 +execute stmt; +eval $reprepared; + +disconnect con1; + +deallocate prepare stmt; +drop table t_27420_100; +drop table t_27420_101; +drop table v_27420; + +# +# Bug#27430 (Crash in subquery code when in PS and table DDL changed after +# PREPARE) +# + +--disable_warnings +drop table if exists t_27430_1; +drop table if exists t_27430_2; +--enable_warnings + +eval $base_count; + +create table t_27430_1 (a int not null, oref int not null, key(a)); +insert into t_27430_1 values + (1, 1), + (1, 1234), + (2, 3), + (2, 1234), + (3, 1234); + +create table t_27430_2 (a int not null, oref int not null); +insert into t_27430_2 values + (1, 1), + (2, 2), + (1234, 3), + (1234, 4); + +prepare stmt from + 'select oref, a, a in (select a from t_27430_1 where oref=t_27430_2.oref) Z from t_27430_2'; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table t_27430_1, t_27430_2; + +create table t_27430_1 (a int, oref int, key(a)); +insert into t_27430_1 values + (1, 1), + (1, NULL), + (2, 3), + (2, NULL), + (3, NULL); + +create table t_27430_2 (a int, oref int); +insert into t_27430_2 values + (1, 1), + (2,2), + (NULL, 3), + (NULL, 4); + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +deallocate prepare stmt; +drop table t_27430_1; +drop table t_27430_2; + +# +# Bug#27690 (Re-execution of prepared statement after table was replaced +# with a view crashes) +# + +--disable_warnings +drop table if exists t_27690_1; +drop view if exists v_27690_1; +drop table if exists v_27690_2; +--enable_warnings + +eval $base_count; + +create table t_27690_1 (a int, b int); +insert into t_27690_1 values (1,1),(2,2); + +create table v_27690_1 as select * from t_27690_1; +create table v_27690_2 as select * from t_27690_1; + +prepare stmt from 'select * from v_27690_1, v_27690_2'; + +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +drop table v_27690_1; + +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; + +--error ER_NO_SUCH_TABLE +execute stmt; +eval $reprepared; + +create view v_27690_1 as select A.a, A.b from t_27690_1 A, t_27690_1 B; + +# REPREPARED +1 +execute stmt; +eval $reprepared; +execute stmt; +eval $reprepared; + +deallocate prepare stmt; +drop table t_27690_1; +drop view v_27690_1; +drop table v_27690_2; + -- cgit v1.2.1 From 7213ca204dc3d10f751ca2507321eede1238d6cb Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 Dec 2007 22:24:01 +0300 Subject: Try to fix assertion failures at slave shutdown when running rpl_ndb tests on sapsrv1. sql/ha_ndbcluster_binlog.cc: Try to fix assertion failures at slave shutdown when running rpl_ndb tests. If the binlog thread is killed, which happens during shutdown, open_tables returns error without setting an error in THD. Therefore one can't access thd->main_da.message() if thd->killed. --- sql/ha_ndbcluster_binlog.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 0e7ca28fd2a..07b0d907229 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -2326,9 +2326,12 @@ static int open_ndb_binlog_index(THD *thd, TABLE_LIST *tables, thd->clear_error(); if (open_tables(thd, &tables, &counter, MYSQL_LOCK_IGNORE_FLUSH)) { - sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'", - thd->main_da.sql_errno(), - thd->main_da.message()); + if (thd->killed) + sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed"); + else + sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'", + thd->main_da.sql_errno(), + thd->main_da.message()); thd->proc_info= save_proc_info; return -1; } -- cgit v1.2.1