summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <pekka@mysql.com>2006-05-03 23:17:16 +0200
committerunknown <pekka@mysql.com>2006-05-03 23:17:16 +0200
commitb5732e7c8cb8d89ca25ed5948f457159a4adc3e0 (patch)
treee6435daebce15a6980f558389be842f1db477813
parentb0ecf7046bc0e3b0fca03f768f8aa382e5b12a5e (diff)
parent6b02ec1caa35731f1532c4f4dc158a0d18a7a57a (diff)
downloadmariadb-git-b5732e7c8cb8d89ca25ed5948f457159a4adc3e0.tar.gz
Merge pnousiainen@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/space/pekka/ndb/version/my50 mysql-test/r/ndb_blob.result: Auto merged mysql-test/t/ndb_blob.test: Auto merged ndb/include/kernel/signaldata/TcKeyReq.hpp: Auto merged ndb/include/ndbapi/NdbBlob.hpp: Auto merged ndb/src/ndbapi/NdbBlob.cpp: Auto merged ndb/test/ndbapi/testBlobs.cpp: Auto merged sql/sql_table.cc: Auto merged ndb/tools/delete_all.cpp: nuts
-rw-r--r--mysql-test/r/ndb_blob.result18
-rw-r--r--mysql-test/t/ndb_blob.test25
-rw-r--r--ndb/include/kernel/signaldata/TcKeyReq.hpp1
-rw-r--r--ndb/include/ndbapi/NdbBlob.hpp1
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp24
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp23
-rw-r--r--ndb/tools/delete_all.cpp31
-rw-r--r--sql/sql_table.cc4
8 files changed, 86 insertions, 41 deletions
diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result
index b1269564558..a6faafa9612 100644
--- a/mysql-test/r/ndb_blob.result
+++ b/mysql-test/r/ndb_blob.result
@@ -481,14 +481,22 @@ msg text NOT NULL
insert into t1 (msg) values(
'Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
-so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).');
+so bad data will not crash kernel.');
select * from t1;
id msg
1 Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).
+drop table t1;
+create table t1 (
+a int primary key not null auto_increment,
+b text
+) engine=ndbcluster;
+select count(*) from t1;
+count(*)
+500
+truncate t1;
+select count(*) from t1;
+count(*)
+0
drop table t1;
diff --git a/mysql-test/t/ndb_blob.test b/mysql-test/t/ndb_blob.test
index f80b7f71281..bf82a793049 100644
--- a/mysql-test/t/ndb_blob.test
+++ b/mysql-test/t/ndb_blob.test
@@ -403,10 +403,29 @@ create table t1 (
insert into t1 (msg) values(
'Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
-so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).');
+so bad data will not crash kernel.');
select * from t1;
drop table t1;
+# -- bug #19201
+create table t1 (
+ a int primary key not null auto_increment,
+ b text
+) engine=ndbcluster;
+--disable_query_log
+set autocommit=1;
+# more rows than batch size (64)
+# for this bug no blob parts would be necessary
+let $1 = 500;
+while ($1)
+{
+ insert into t1 (b) values (repeat('x',4000));
+ dec $1;
+}
+--enable_query_log
+select count(*) from t1;
+truncate t1;
+select count(*) from t1;
+drop table t1;
+
# End of 4.1 tests
diff --git a/ndb/include/kernel/signaldata/TcKeyReq.hpp b/ndb/include/kernel/signaldata/TcKeyReq.hpp
index d7c11ca773c..f611d2c1567 100644
--- a/ndb/include/kernel/signaldata/TcKeyReq.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyReq.hpp
@@ -39,6 +39,7 @@ class TcKeyReq {
friend class NdbOperation;
friend class NdbIndexOperation;
friend class NdbScanOperation;
+ friend class NdbBlob;
friend class DbUtil;
/**
diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/ndb/include/ndbapi/NdbBlob.hpp
index cb0caafe34f..76aa8e710ac 100644
--- a/ndb/include/ndbapi/NdbBlob.hpp
+++ b/ndb/include/ndbapi/NdbBlob.hpp
@@ -290,6 +290,7 @@ private:
bool isWriteOp();
bool isDeleteOp();
bool isScanOp();
+ bool isTakeOverOp();
// computations
Uint32 getPartNumber(Uint64 pos);
Uint32 getPartCount();
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
index 35cf71b5e5a..7d2793047c2 100644
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ b/ndb/src/ndbapi/NdbBlob.cpp
@@ -23,6 +23,7 @@
#include <NdbBlob.hpp>
#include "NdbBlobImpl.hpp"
#include <NdbScanOperation.hpp>
+#include <signaldata/TcKeyReq.hpp>
/*
* Reading index table directly (as a table) is faster but there are
@@ -264,6 +265,13 @@ NdbBlob::isScanOp()
theNdbOp->theOperationType == NdbOperation::OpenRangeScanRequest;
}
+inline bool
+NdbBlob::isTakeOverOp()
+{
+ return
+ TcKeyReq::getTakeOverScanFlag(theNdbOp->theScanInfo);
+}
+
// computations (inline)
inline Uint32
@@ -1203,8 +1211,22 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
if (isUpdateOp() || isWriteOp() || isDeleteOp()) {
// add operation before this one to read head+inline
NdbOperation* tOp = theNdbCon->getNdbOperation(theTable, theNdbOp);
+ /*
+ * If main op is from take over scan lock, the added read is done
+ * as committed read:
+ *
+ * In normal transactional case, the row is locked by us and
+ * committed read returns same as normal read.
+ *
+ * In current TRUNCATE TABLE, the deleting trans is committed in
+ * batches and then restarted with new trans id. A normal read
+ * would hang on the scan delete lock and then fail.
+ */
+ NdbOperation::LockMode lockMode =
+ ! isTakeOverOp() ?
+ NdbOperation::LM_Read : NdbOperation::LM_CommittedRead;
if (tOp == NULL ||
- tOp->readTuple() == -1 ||
+ tOp->readTuple(lockMode) == -1 ||
setTableKeyValue(tOp) == -1 ||
getHeadInlineValue(tOp) == -1) {
setErrorCode(tOp);
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index fff5ac247df..c2a06c9ffa0 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -44,6 +44,7 @@ struct Opt {
bool m_dbg;
bool m_dbgall;
const char* m_dbug;
+ bool m_fac;
bool m_full;
unsigned m_loop;
unsigned m_parts;
@@ -72,6 +73,7 @@ struct Opt {
m_dbg(false),
m_dbgall(false),
m_dbug(0),
+ m_fac(false),
m_full(false),
m_loop(1),
m_parts(10),
@@ -110,6 +112,7 @@ printusage()
<< " -dbg print debug" << endl
<< " -dbgall print also NDB API debug (if compiled in)" << endl
<< " -dbug opt dbug options" << endl
+ << " -fac fetch across commit in scan delete [" << d.m_fac << "]" << endl
<< " -full read/write only full blob values" << endl
<< " -loop N loop N times 0=forever [" << d.m_loop << "]" << endl
<< " -parts N max parts in blob value [" << d.m_parts << "]" << endl
@@ -1255,23 +1258,11 @@ deleteScan(bool idx)
CHK((ret = g_ops->nextResult(false)) == 0 || ret == 1 || ret == 2);
if (++n == g_opt.m_batch || ret == 2) {
DBG("execute batch: n=" << n << " ret=" << ret);
- switch (0) {
- case 0: // works normally
+ if (! g_opt.m_fac) {
CHK(g_con->execute(NoCommit) == 0);
- CHK(true || g_con->restart() == 0);
- break;
- case 1: // nonsense - g_con is invalid for 2nd batch
- CHK(g_con->execute(Commit) == 0);
- CHK(true || g_con->restart() == 0);
- break;
- case 2: // DBTC sendSignalErrorRefuseLab
- CHK(g_con->execute(NoCommit) == 0);
- CHK(g_con->restart() == 0);
- break;
- case 3: // 266 time-out
+ } else {
CHK(g_con->execute(Commit) == 0);
CHK(g_con->restart() == 0);
- break;
}
n = 0;
}
@@ -1817,6 +1808,10 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
continue;
}
}
+ if (strcmp(arg, "-fac") == 0) {
+ g_opt.m_fac = true;
+ continue;
+ }
if (strcmp(arg, "-full") == 0) {
g_opt.m_full = true;
continue;
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index d6972e33cc0..6aea9f87aaa 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -23,17 +23,21 @@
#include <NDBT.hpp>
static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
- bool commit_across_open_cursor, int parallelism=240);
+ bool fetch_across_commit, int parallelism=240);
NDB_STD_OPTS_VARS;
static const char* _dbname = "TEST_DB";
+static my_bool _transactional = false;
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
(gptr*) &_dbname, (gptr*) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "transactional", 't', "Single transaction (may run out of operations)",
+ (gptr*) &_transactional, (gptr*) &_transactional, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static void usage()
@@ -84,18 +88,11 @@ int main(int argc, char** argv){
ndbout << " Table " << argv[i] << " does not exist!" << endl;
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- // Check if we have any blobs
- bool commit_across_open_cursor = true;
- for (int j = 0; j < pTab->getNoOfColumns(); j++) {
- NdbDictionary::Column::Type t = pTab->getColumn(j)->getType();
- if (t == NdbDictionary::Column::Blob ||
- t == NdbDictionary::Column::Text) {
- commit_across_open_cursor = false;
- break;
- }
- }
- ndbout << "Deleting all from " << argv[i] << "...";
- if(clear_table(&MyNdb, pTab, commit_across_open_cursor) == NDBT_FAILED){
+ ndbout << "Deleting all from " << argv[i];
+ if (! _transactional)
+ ndbout << " (non-transactional)";
+ ndbout << " ...";
+ if(clear_table(&MyNdb, pTab, ! _transactional) == NDBT_FAILED){
res = NDBT_FAILED;
ndbout << "FAILED" << endl;
}
@@ -105,7 +102,7 @@ int main(int argc, char** argv){
int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
- bool commit_across_open_cursor, int parallelism)
+ bool fetch_across_commit, int parallelism)
{
// Scan all records exclusive and delete
// them one by one
@@ -136,7 +133,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
}
goto failed;
}
-
+
pOp = pTrans->getNdbScanOperation(pTab->getName());
if (pOp == NULL) {
goto failed;
@@ -166,7 +163,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
} while((check = pOp->nextResult(false)) == 0);
if(check != -1){
- if (commit_across_open_cursor) {
+ if (fetch_across_commit) {
check = pTrans->execute(NdbTransaction::Commit);
pTrans->restart(); // new tx id
} else {
@@ -197,7 +194,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
}
goto failed;
}
- if (! commit_across_open_cursor &&
+ if (! fetch_across_commit &&
pTrans->execute(NdbTransaction::Commit) != 0) {
err = pTrans->getNdbError();
goto failed;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 073a847ad90..7b739798b38 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1941,7 +1941,9 @@ mysql_rename_table(enum db_type base,
}
}
delete file;
- if (error)
+ if (error == HA_ERR_WRONG_COMMAND)
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE");
+ else if (error)
my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error);
DBUG_RETURN(error != 0);
}