summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <mskold@mysql.com>2006-06-12 15:35:46 +0200
committerunknown <mskold@mysql.com>2006-06-12 15:35:46 +0200
commit7b207a8301e9adc7191c2771f57f309005eea86f (patch)
treebcaec50cc03bd83a0281f38a9862d3d89ae53814
parent1d522eb77de2680aaf9432b6e3627aeaa2081197 (diff)
parenta3f3696697f1ef991ea856fc9dd7bb113309555a (diff)
downloadmariadb-git-7b207a8301e9adc7191c2771f57f309005eea86f.tar.gz
Merge mskold@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/home/marty/MySQL/mysql-4.1
-rw-r--r--mysql-test/r/ndb_lock.result86
-rw-r--r--mysql-test/r/ndb_truncate.result23
-rw-r--r--mysql-test/t/ndb_lock.test111
-rw-r--r--mysql-test/t/ndb_truncate.test23
-rw-r--r--ndb/include/ndbapi/NdbIndexScanOperation.hpp7
-rw-r--r--ndb/include/ndbapi/NdbResultSet.hpp21
-rw-r--r--ndb/include/ndbapi/NdbScanOperation.hpp6
-rw-r--r--ndb/src/ndbapi/NdbResultSet.cpp11
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp30
-rw-r--r--ndb/src/ndbapi/ndberror.c2
-rw-r--r--sql/ha_ndbcluster.cc94
-rw-r--r--sql/ha_ndbcluster.h2
-rw-r--r--sql/handler.h3
13 files changed, 375 insertions, 44 deletions
diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result
index b8c2c58aac4..281b02b8228 100644
--- a/mysql-test/r/ndb_lock.result
+++ b/mysql-test/r/ndb_lock.result
@@ -63,3 +63,89 @@ pk u o
5 5 5
insert into t1 values (1,1,1);
drop table t1;
+create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
+insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3);
+begin;
+select * from t1 where x = 1 for update;
+x y z
+1 one 1
+begin;
+select * from t1 where x = 2 for update;
+x y z
+2 two 2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where y = 'one' or y = 'three' for update;
+x y z
+3 three 3
+1 one 1
+begin;
+select * from t1 where x = 2 for update;
+x y z
+2 two 2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where z > 1 and z < 3 for update;
+x y z
+2 two 2
+begin;
+select * from t1 where x = 1 for update;
+x y z
+1 one 1
+select * from t1 where x = 2 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where x = 1 lock in share mode;
+x y z
+1 one 1
+begin;
+select * from t1 where x = 1 lock in share mode;
+x y z
+1 one 1
+select * from t1 where x = 2 for update;
+x y z
+2 two 2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where y = 'one' or y = 'three' lock in share mode;
+x y z
+3 three 3
+1 one 1
+begin;
+select * from t1 where y = 'one' lock in share mode;
+x y z
+1 one 1
+select * from t1 where x = 2 for update;
+x y z
+2 two 2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where z > 1 and z < 3 lock in share mode;
+x y z
+2 two 2
+begin;
+select * from t1 where z = 1 lock in share mode;
+x y z
+1 one 1
+select * from t1 where x = 1 for update;
+x y z
+1 one 1
+select * from t1 where x = 2 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+drop table t1;
diff --git a/mysql-test/r/ndb_truncate.result b/mysql-test/r/ndb_truncate.result
index 38f3a78029c..811e5e3afeb 100644
--- a/mysql-test/r/ndb_truncate.result
+++ b/mysql-test/r/ndb_truncate.result
@@ -1,14 +1,23 @@
-DROP TABLE IF EXISTS t2;
-CREATE TABLE t2 (
-a bigint unsigned NOT NULL PRIMARY KEY,
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+a bigint unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,
b int unsigned not null,
c int unsigned
) engine=ndbcluster;
-select count(*) from t2;
+select count(*) from t1;
count(*)
5000
-truncate table t2;
-select count(*) from t2;
+select * from t1 order by a limit 2;
+a b c
+1 509 2500
+2 510 7
+truncate table t1;
+select count(*) from t1;
count(*)
0
-drop table t2;
+insert into t1 values(NULL,1,1),(NULL,2,2);
+select * from t1 order by a;
+a b c
+1 1 1
+2 2 2
+drop table t1;
diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test
index 6945f91ee39..db42b7ec2dd 100644
--- a/mysql-test/t/ndb_lock.test
+++ b/mysql-test/t/ndb_lock.test
@@ -69,4 +69,115 @@ insert into t1 values (1,1,1);
drop table t1;
+# Lock for update
+
+create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
+
+insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3);
+
+# PK access
+connection con1;
+begin;
+select * from t1 where x = 1 for update;
+
+connection con2;
+begin;
+select * from t1 where x = 2 for update;
+--error 1205
+select * from t1 where x = 1 for update;
+rollback;
+
+connection con1;
+commit;
+
+# table scan
+connection con1;
+begin;
+select * from t1 where y = 'one' or y = 'three' for update;
+
+connection con2;
+begin;
+# Have to check with pk access here since scans take locks on
+# all rows and then release them in chunks
+select * from t1 where x = 2 for update;
+--error 1205
+select * from t1 where x = 1 for update;
+rollback;
+
+connection con1;
+commit;
+
+# index scan
+connection con1;
+begin;
+select * from t1 where z > 1 and z < 3 for update;
+
+connection con2;
+begin;
+# Have to check with pk access here since scans take locks on
+# all rows and then release them in chunks
+select * from t1 where x = 1 for update;
+--error 1205
+select * from t1 where x = 2 for update;
+rollback;
+
+connection con1;
+commit;
+
+# share locking
+
+# PK access
+connection con1;
+begin;
+select * from t1 where x = 1 lock in share mode;
+
+connection con2;
+begin;
+select * from t1 where x = 1 lock in share mode;
+select * from t1 where x = 2 for update;
+--error 1205
+select * from t1 where x = 1 for update;
+rollback;
+
+connection con1;
+commit;
+
+# table scan
+connection con1;
+begin;
+select * from t1 where y = 'one' or y = 'three' lock in share mode;
+
+connection con2;
+begin;
+select * from t1 where y = 'one' lock in share mode;
+# Have to check with pk access here since scans take locks on
+# all rows and then release them in chunks
+select * from t1 where x = 2 for update;
+--error 1205
+select * from t1 where x = 1 for update;
+rollback;
+
+connection con1;
+commit;
+
+# index scan
+connection con1;
+begin;
+select * from t1 where z > 1 and z < 3 lock in share mode;
+
+connection con2;
+begin;
+select * from t1 where z = 1 lock in share mode;
+# Have to check with pk access here since scans take locks on
+# all rows and then release them in chunks
+select * from t1 where x = 1 for update;
+--error 1205
+select * from t1 where x = 2 for update;
+rollback;
+
+connection con1;
+commit;
+
+drop table t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/ndb_truncate.test b/mysql-test/t/ndb_truncate.test
index 73af70d0d0f..a1ef4be0d48 100644
--- a/mysql-test/t/ndb_truncate.test
+++ b/mysql-test/t/ndb_truncate.test
@@ -2,12 +2,11 @@
-- source include/not_embedded.inc
--disable_warnings
-DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t1, t2;
--enable_warnings
-
-CREATE TABLE t2 (
- a bigint unsigned NOT NULL PRIMARY KEY,
+CREATE TABLE t1 (
+ a bigint unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,
b int unsigned not null,
c int unsigned
) engine=ndbcluster;
@@ -20,17 +19,23 @@ let $1=500;
disable_query_log;
while ($1)
{
- eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1);
+ eval insert into t1 values(NULL, $1+9, 5*$1), (NULL, $1+10, 7),(NULL, $1+10, 7*$1), (NULL, $1+10, 10+$1), (NULL, $1+10, 70*$1), (NULL, $1+10, 7), (NULL, $1+10, 9), (NULL, $1+299, 899), (NULL, $1+10, 12), (NULL, $1+10, 14*$1);
dec $1;
}
enable_query_log;
-select count(*) from t2;
+select count(*) from t1;
+
+select * from t1 order by a limit 2;
+
+truncate table t1;
+
+select count(*) from t1;
-truncate table t2;
+insert into t1 values(NULL,1,1),(NULL,2,2);
-select count(*) from t2;
+select * from t1 order by a;
-drop table t2;
+drop table t1;
# End of 4.1 tests
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 7cd2daea6a6..e96f46e0f32 100644
--- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -45,14 +45,15 @@ public:
NdbResultSet* readTuples(LockMode = LM_Read,
Uint32 batch = 0,
Uint32 parallel = 0,
- bool order_by = false);
+ bool order_by = false,
+ bool keyinfo = false);
inline NdbResultSet* readTuples(int parallell){
- return readTuples(LM_Read, 0, parallell, false);
+ return readTuples(LM_Read, 0, parallell);
}
inline NdbResultSet* readTuplesExclusive(int parallell = 0){
- return readTuples(LM_Exclusive, 0, parallell, false);
+ return readTuples(LM_Exclusive, 0, parallell);
}
/**
diff --git a/ndb/include/ndbapi/NdbResultSet.hpp b/ndb/include/ndbapi/NdbResultSet.hpp
index dc0288a380c..2a196b1c6ae 100644
--- a/ndb/include/ndbapi/NdbResultSet.hpp
+++ b/ndb/include/ndbapi/NdbResultSet.hpp
@@ -102,6 +102,27 @@ public:
int restart(bool forceSend = false);
/**
+ * Lock current row by transfering scan operation to a locking transaction.
+ * Use this function
+ * when a scan has found a record that you want to lock.
+ * 1. Start a new transaction.
+ * 2. Call the function takeOverForUpdate using your new transaction
+ * as parameter, all the properties of the found record will be copied
+ * to the new transaction.
+ * 3. When you execute the new transaction, the lock held by the scan will
+ * be transferred to the new transaction(it's taken over).
+ *
+ * @note You must have started the scan with openScanExclusive
+ * or explictly have requested keyinfo to be able to lock
+ * the found tuple.
+ *
+ * @param lockingTrans the locking transaction connection.
+ * @return an NdbOperation or NULL.
+ */
+ NdbOperation* lockTuple();
+ NdbOperation* lockTuple(NdbConnection* lockingTrans);
+
+ /**
* Transfer scan operation to an updating transaction. Use this function
* when a scan has found a record that you want to update.
* 1. Start a new transaction.
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp
index f6e68dd4abe..af656720efb 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -64,14 +64,16 @@ public:
* Tuples are not stored in NdbResultSet until execute(NoCommit)
* has been executed and nextResult has been called.
*
+ * @param keyinfo Return primary key, needed to be able to call lockTuple
* @param parallel Scan parallelism
* @param batch No of rows to fetch from each fragment at a time
* @param LockMode Scan lock handling
* @returns NdbResultSet.
- * @note specifying 0 for batch and parallall means max performance
+ * @note specifying 0 for batch and parallell means max performance
*/
NdbResultSet* readTuples(LockMode = LM_Read,
- Uint32 batch = 0, Uint32 parallel = 0);
+ Uint32 batch = 0, Uint32 parallel = 0,
+ bool keyinfo = false);
inline NdbResultSet* readTuples(int parallell){
return readTuples(LM_Read, 0, parallell);
diff --git a/ndb/src/ndbapi/NdbResultSet.cpp b/ndb/src/ndbapi/NdbResultSet.cpp
index 87b304126ba..780660169b3 100644
--- a/ndb/src/ndbapi/NdbResultSet.cpp
+++ b/ndb/src/ndbapi/NdbResultSet.cpp
@@ -73,6 +73,17 @@ void NdbResultSet::close(bool forceSend)
}
NdbOperation*
+NdbResultSet::lockTuple(){
+ return lockTuple(m_operation->m_transConnection);
+}
+
+NdbOperation*
+NdbResultSet::lockTuple(NdbConnection* takeOverTrans){
+ return m_operation->takeOverScanOp(NdbOperation::ReadRequest,
+ takeOverTrans);
+}
+
+NdbOperation*
NdbResultSet::updateTuple(){
return updateTuple(m_operation->m_transConnection);
}
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index fc5a22cce17..0a39651ce28 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -126,7 +126,8 @@ NdbScanOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection)
NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
Uint32 batch,
- Uint32 parallel)
+ Uint32 parallel,
+ bool keyinfo)
{
m_ordered = 0;
@@ -170,7 +171,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
return 0;
}
- m_keyInfo = lockExcl ? 1 : 0;
+ m_keyInfo = (keyinfo || lockExcl) ? 1 : 0;
bool range = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
@@ -956,18 +957,28 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
if (newOp == NULL){
return NULL;
}
+ if (!m_keyInfo)
+ {
+ // Cannot take over lock if no keyinfo was requested
+ setErrorCodeAbort(4604);
+ return NULL;
+ }
pTrans->theSimpleState = 0;
const Uint32 len = (tRecAttr->attrSize() * tRecAttr->arraySize() + 3)/4-1;
newOp->theTupKeyLen = len;
newOp->theOperationType = opType;
- if (opType == DeleteRequest) {
- newOp->theStatus = GetValue;
- } else {
- newOp->theStatus = SetValue;
+ switch (opType) {
+ case (ReadRequest):
+ newOp->theLockMode = theLockMode;
+ // Fall through
+ case (DeleteRequest):
+ newOp->theStatus = GetValue;
+ break;
+ default:
+ newOp->theStatus = SetValue;
}
-
const Uint32 * src = (Uint32*)tRecAttr->aRef();
const Uint32 tScanInfo = src[len] & 0x3FFFF;
const Uint32 tTakeOverNode = src[len] >> 20;
@@ -1241,8 +1252,9 @@ NdbResultSet*
NdbIndexScanOperation::readTuples(LockMode lm,
Uint32 batch,
Uint32 parallel,
- bool order_by){
- NdbResultSet * rs = NdbScanOperation::readTuples(lm, batch, 0);
+ bool order_by,
+ bool keyinfo){
+ NdbResultSet * rs = NdbScanOperation::readTuples(lm, batch, 0, keyinfo);
if(rs && order_by){
m_ordered = 1;
Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 69fc47ff70c..dea1b8c40ae 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -281,7 +281,7 @@ ErrorBundle ErrorCodes[] = {
{ 4601, AE, "Transaction is not started"},
{ 4602, AE, "You must call getNdbOperation before executeScan" },
{ 4603, AE, "There can only be ONE operation in a scan transaction" },
- { 4604, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" },
+ { 4604, AE, "takeOverScanOp, to take over a scanned row one must explicitly request keyinfo in readTuples call" },
{ 4605, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
{ 4607, AE, "There may only be one operation in a scan transaction"},
{ 4608, AE, "You can not takeOverScan unless you have used openScanExclusive"},
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 3f2c6cbc6bb..1031c4f635c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1,4 +1,4 @@
- /* Copyright (C) 2000-2003 MySQL AB
+ /* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1043,12 +1043,23 @@ void ha_ndbcluster::release_metadata()
int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
+ DBUG_ENTER("ha_ndbcluster::get_ndb_lock_type");
if (type >= TL_WRITE_ALLOW_WRITE)
- return NdbOperation::LM_Exclusive;
- else if (uses_blob_value(m_retrieve_all_fields))
- return NdbOperation::LM_Read;
+ {
+ DBUG_PRINT("info", ("Using exclusive lock"));
+ DBUG_RETURN(NdbOperation::LM_Exclusive);
+ }
+ else if (type == TL_READ_WITH_SHARED_LOCKS ||
+ uses_blob_value(m_retrieve_all_fields))
+ {
+ DBUG_PRINT("info", ("Using read lock"));
+ DBUG_RETURN(NdbOperation::LM_Read);
+ }
else
- return NdbOperation::LM_CommittedRead;
+ {
+ DBUG_PRINT("info", ("Using committed read"));
+ DBUG_RETURN(NdbOperation::LM_CommittedRead);
+ }
}
static const ulong index_type_flags[]=
@@ -1384,12 +1395,35 @@ inline int ha_ndbcluster::next_result(byte *buf)
if (!cursor)
DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ if (m_lock_tuple)
+ {
+ /*
+ Lock level m_lock.type either TL_WRITE_ALLOW_WRITE
+ (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT
+ LOCK WITH SHARE MODE) and row was not explictly unlocked
+ with unlock_row() call
+ */
+ NdbConnection *trans= m_active_trans;
+ NdbOperation *op;
+ // Lock row
+ DBUG_PRINT("info", ("Keeping lock on scanned row"));
+
+ if (!(op= m_active_cursor->lockTuple()))
+ {
+ m_lock_tuple= false;
+ ERR_RETURN(trans->getNdbError());
+ }
+ m_ops_pending++;
+ }
+ m_lock_tuple= false;
/*
If this an update or delete, call nextResult with false
to process any records already cached in NdbApi
*/
- bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE;
+ bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE &&
+ m_lock.type != TL_READ_WITH_SHARED_LOCKS;
do {
DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb));
/*
@@ -1407,9 +1441,16 @@ inline int ha_ndbcluster::next_result(byte *buf)
{
// One more record found
DBUG_PRINT("info", ("One more record found"));
-
+
unpack_record(buf);
table->status= 0;
+ /*
+ Explicitly lock tuple if "select for update" or
+ "select lock in share mode"
+ */
+ m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE
+ ||
+ m_lock.type == TL_READ_WITH_SHARED_LOCKS);
DBUG_RETURN(0);
}
else if (check == 1 || check == 2)
@@ -1444,7 +1485,6 @@ inline int ha_ndbcluster::next_result(byte *buf)
contact_ndb= (check == 2);
}
} while (check == 2);
-
table->status= STATUS_NOT_FOUND;
if (check == -1)
DBUG_RETURN(ndb_err(trans));
@@ -1679,10 +1719,11 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
restart= false;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+ bool need_pk = (lm == NdbOperation::LM_Read);
if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *)
m_index[active_index].index,
(const NDBTAB *) m_table)) ||
- !(cursor= op->readTuples(lm, 0, parallelism, sorted)))
+ !(cursor= op->readTuples(lm, 0, parallelism, sorted, need_pk)))
ERR_RETURN(trans->getNdbError());
m_active_cursor= cursor;
} else {
@@ -1817,8 +1858,9 @@ int ha_ndbcluster::full_table_scan(byte *buf)
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+ bool need_pk = (lm == NdbOperation::LM_Read);
if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) ||
- !(cursor= op->readTuples(lm, 0, parallelism)))
+ !(cursor= op->readTuples(lm, 0, parallelism, need_pk)))
ERR_RETURN(trans->getNdbError());
m_active_cursor= cursor;
DBUG_RETURN(define_read_attrs(buf, op));
@@ -2082,6 +2124,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
DBUG_PRINT("info", ("Calling updateTuple on cursor"));
if (!(op= cursor->updateTuple()))
ERR_RETURN(trans->getNdbError());
+ m_lock_tuple= false;
m_ops_pending++;
if (uses_blob_value(FALSE))
m_blobs_pending= TRUE;
@@ -2157,6 +2200,7 @@ int ha_ndbcluster::delete_row(const byte *record)
DBUG_PRINT("info", ("Calling deleteTuple on cursor"));
if (cursor->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
+ m_lock_tuple= false;
m_ops_pending++;
no_uncommitted_rows_update(-1);
@@ -2439,6 +2483,12 @@ int ha_ndbcluster::index_init(uint index)
{
DBUG_ENTER("index_init");
DBUG_PRINT("enter", ("index: %u", index));
+ /*
+ Locks are are explicitly released in scan
+ unless m_lock.type == TL_READ_HIGH_PRIORITY
+ and no sub-sequent call to unlock_row()
+ */
+ m_lock_tuple= false;
DBUG_RETURN(handler::index_init(index));
}
@@ -2683,7 +2733,7 @@ int ha_ndbcluster::close_scan()
if (!cursor)
DBUG_RETURN(1);
-
+ m_lock_tuple= false;
if (m_ops_pending)
{
/*
@@ -3376,6 +3426,22 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
}
/*
+ Unlock the last row read in an open scan.
+ Rows are unlocked by default in ndb, but
+ for SELECT FOR UPDATE and SELECT LOCK WIT SHARE MODE
+ locks are kept if unlock_row() is not called.
+*/
+
+void ha_ndbcluster::unlock_row()
+{
+ DBUG_ENTER("unlock_row");
+
+ DBUG_PRINT("info", ("Unlocking row"));
+ m_lock_tuple= false;
+ DBUG_VOID_RETURN;
+}
+
+/*
Start a transaction for running a statement if one is not
already running in a transaction. This will be the case in
a BEGIN; COMMIT; block
@@ -3767,6 +3833,12 @@ int ha_ndbcluster::create(const char *name,
set_dbname(name2);
set_tabname(name2);
+ if (current_thd->lex->sql_command == SQLCOM_TRUNCATE)
+ {
+ DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE"));
+ if ((my_errno= delete_table(name)))
+ DBUG_RETURN(my_errno);
+ }
if (create_from_engine)
{
/*
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 83d9d87777a..313e497f9b5 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -120,6 +120,7 @@ class ha_ndbcluster: public handler
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int reset();
int external_lock(THD *thd, int lock_type);
+ void unlock_row();
int start_stmt(THD *thd);
const char * table_type() const;
const char ** bas_ext() const;
@@ -223,6 +224,7 @@ class ha_ndbcluster: public handler
char m_tabname[FN_HEADLEN];
ulong m_table_flags;
THR_LOCK_DATA m_lock;
+ bool m_lock_tuple;
NDB_SHARE *m_share;
NDB_INDEX_DATA m_index[MAX_KEY];
// NdbRecAttr has no reference to blob
diff --git a/sql/handler.h b/sql/handler.h
index d4bb19dd7b2..e361dfd4559 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -533,8 +533,7 @@ extern TYPELIB myisam_stats_method_typelib;
#define ha_supports_generate(T) (T != DB_TYPE_INNODB && \
T != DB_TYPE_BERKELEY_DB && \
T != DB_TYPE_ARCHIVE_DB && \
- T != DB_TYPE_FEDERATED_DB && \
- T != DB_TYPE_NDBCLUSTER)
+ T != DB_TYPE_FEDERATED_DB)
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type);