summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mysql-test/r/ndb_alter_table.result2
-rw-r--r--mysql-test/r/ndb_basic.result60
-rw-r--r--mysql-test/r/ndb_multi.result1
-rw-r--r--mysql-test/t/ndb_alter_table.test2
-rw-r--r--mysql-test/t/ndb_basic.test27
-rw-r--r--mysql-test/t/ndb_multi.test2
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp6
-rw-r--r--ndb/src/ndbapi/Ndb.cpp36
-rw-r--r--ndb/test/ndbapi/testOperations.cpp93
-rw-r--r--sql/ha_ndbcluster.cc40
-rw-r--r--sql/share/swedish/errmsg.txt2
11 files changed, 247 insertions, 24 deletions
diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result
index 88ac04db111..52ae0b58d56 100644
--- a/mysql-test/r/ndb_alter_table.result
+++ b/mysql-test/r/ndb_alter_table.result
@@ -179,7 +179,7 @@ a b c
2 two two
alter table t1 drop index c;
select * from t1 where b = 'two';
-ERROR 42S02: Table 'test.t1' doesn't exist
+ERROR HY000: Table definition has changed, please retry transaction
select * from t1 where b = 'two';
a b c
2 two two
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index a6396080ef0..3712fa2b5ca 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -607,3 +607,63 @@ primary key (a))
engine=ndb
max_rows=1;
drop table t1;
+create table t1
+(counter int(64) NOT NULL auto_increment,
+datavalue char(40) default 'XXXX',
+primary key (counter)
+) ENGINE=ndbcluster;
+insert into t1 (datavalue) values ('newval');
+insert into t1 (datavalue) values ('newval');
+select * from t1 order by counter;
+counter datavalue
+1 newval
+2 newval
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+select * from t1 order by counter;
+counter datavalue
+1 newval
+2 newval
+3 newval
+4 newval
+5 newval
+6 newval
+7 newval
+8 newval
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+select * from t1 order by counter;
+counter datavalue
+1 newval
+2 newval
+3 newval
+4 newval
+5 newval
+6 newval
+7 newval
+8 newval
+35 newval
+36 newval
+37 newval
+38 newval
+39 newval
+40 newval
+41 newval
+42 newval
+43 newval
+44 newval
+45 newval
+46 newval
+47 newval
+48 newval
+49 newval
+50 newval
+51 newval
+52 newval
+53 newval
+54 newval
+55 newval
+56 newval
+57 newval
+58 newval
+drop table t1;
diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result
index 4a2389cd1ff..5696fda1c07 100644
--- a/mysql-test/r/ndb_multi.result
+++ b/mysql-test/r/ndb_multi.result
@@ -47,3 +47,4 @@ t2
t3
t4
drop table t1, t2, t3, t4;
+drop table t1, t3, t4;
diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test
index f39edc0ee65..3ff2e735cb5 100644
--- a/mysql-test/t/ndb_alter_table.test
+++ b/mysql-test/t/ndb_alter_table.test
@@ -143,7 +143,7 @@ select * from t1 where b = 'two';
connection server1;
alter table t1 drop index c;
connection server2;
---error 1146
+--error 1105
select * from t1 where b = 'two';
select * from t1 where b = 'two';
connection server1;
diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test
index f460c573a9d..66300f61fc3 100644
--- a/mysql-test/t/ndb_basic.test
+++ b/mysql-test/t/ndb_basic.test
@@ -577,3 +577,30 @@ create table t1
engine=ndb
max_rows=1;
drop table t1;
+
+#
+# Test auto_increment
+#
+
+connect (con1,localhost,,,test);
+connect (con2,localhost,,,test);
+
+create table t1
+ (counter int(64) NOT NULL auto_increment,
+ datavalue char(40) default 'XXXX',
+ primary key (counter)
+ ) ENGINE=ndbcluster;
+
+connection con1;
+insert into t1 (datavalue) values ('newval');
+insert into t1 (datavalue) values ('newval');
+select * from t1 order by counter;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+select * from t1 order by counter;
+connection con2;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+insert into t1 (datavalue) select datavalue from t1 where counter < 100;
+select * from t1 order by counter;
+
+drop table t1;
diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test
index 9286721b677..27ddd6508e9 100644
--- a/mysql-test/t/ndb_multi.test
+++ b/mysql-test/t/ndb_multi.test
@@ -40,5 +40,7 @@ show status like 'handler_discover%';
show tables;
drop table t1, t2, t3, t4;
+connection server2;
+drop table t1, t3, t4;
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index c6d4e6af4d4..17c5a31cbed 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -11274,10 +11274,10 @@ void Dbacc::execACC_CHECK_SCAN(Signal* signal)
operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
takeOutReadyScanQueue(signal);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
jam();
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
if (fragrecptr.p->createLcp == ZTRUE) {
if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
jam();
@@ -11922,8 +11922,6 @@ void Dbacc::sendNextScanConf(Signal* signal)
return;
}//if
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
if (fragrecptr.p->keyLength != 0) {
jam();
signal->theData[0] = scanPtr.p->scanUserptr;
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index b5493622b70..6390a1b50b5 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -722,26 +722,28 @@ Remark: Returns a new TupleId to the application.
Uint64
Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
{
- DEBUG_TRACE("getAutoIncrementValue");
+ DBUG_ENTER("getAutoIncrementValue");
const char * internalTableName = internalizeTableName(aTableName);
Ndb_local_table_info *info=
theDictionary->get_local_table_info(internalTableName, false);
if (info == 0)
- return ~0;
+ DBUG_RETURN(~0);
const NdbTableImpl *table= info->m_table_impl;
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
- return tupleId;
+ DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_RETURN(tupleId);
}
Uint64
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize)
{
- DEBUG_TRACE("getAutoIncrementValue");
+ DBUG_ENTER("getAutoIncrementValue");
if (aTable == 0)
- return ~0;
+ DBUG_RETURN(~0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
- return tupleId;
+ DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_RETURN(tupleId);
}
Uint64
@@ -756,39 +758,45 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
Uint64
Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
{
+ DBUG_ENTER("getTupleIdFromNdb");
if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] )
{
theFirstTupleId[aTableId]++;
- return theFirstTupleId[aTableId];
+ DBUG_PRINT("info", ("next cached value %u", theFirstTupleId[aTableId]));
+ DBUG_RETURN(theFirstTupleId[aTableId]);
}
else // theFirstTupleId == theLastTupleId
{
- return opTupleIdOnNdb(aTableId, cacheSize, 0);
+ DBUG_PRINT("info",("reading %u values from database",
+ (cacheSize == 0) ? 1 : cacheSize));
+ DBUG_RETURN(opTupleIdOnNdb(aTableId, (cacheSize == 0) ? 1 : cacheSize, 0));
}
}
Uint64
Ndb::readAutoIncrementValue(const char* aTableName)
{
- DEBUG_TRACE("readtAutoIncrementValue");
+ DBUG_ENTER("readtAutoIncrementValue");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0) {
theError= theDictionary->getNdbError();
- return ~0;
+ DBUG_RETURN(~0);
}
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
- return tupleId;
+ DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_RETURN(tupleId);
}
Uint64
Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
{
- DEBUG_TRACE("readtAutoIncrementValue");
+ DBUG_ENTER("readtAutoIncrementValue");
if (aTable == 0)
- return ~0;
+ DBUG_RETURN(~0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
- return tupleId;
+ DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_RETURN(tupleId);
}
Uint64
diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp
index e254aff58dc..9f1d5ee1191 100644
--- a/ndb/test/ndbapi/testOperations.cpp
+++ b/ndb/test/ndbapi/testOperations.cpp
@@ -531,6 +531,74 @@ runOperations(NDBT_Context* ctx, NDBT_Step* step)
}
int
+runLockUpgrade1(NDBT_Context* ctx, NDBT_Step* step){
+ // Verify that data in index match
+ // table data
+ Ndb* pNdb = GETNDB(step);
+ HugoOperations hugoOps(*ctx->getTab());
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ if(hugoTrans.loadTable(pNdb, 1) != 0){
+ g_err << "Load table failed" << endl;
+ return NDBT_FAILED;
+ }
+
+ int result= NDBT_OK;
+ do
+ {
+ CHECK(hugoOps.startTransaction(pNdb) == 0);
+ CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0);
+ CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
+
+ ctx->setProperty("READ_DONE", 1);
+ ctx->broadcast();
+ ndbout_c("wait 2");
+ ctx->getPropertyWait("READ_DONE", 2);
+ ndbout_c("wait 2 - done");
+ ctx->setProperty("READ_DONE", 3);
+ ctx->broadcast();
+ ndbout_c("before update");
+ CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, 2) == 0);
+ ndbout_c("wait update");
+ CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
+ CHECK(hugoOps.closeTransaction(pNdb));
+ } while(0);
+
+ return result;
+}
+
+int
+runLockUpgrade2(NDBT_Context* ctx, NDBT_Step* step){
+ // Verify that data in index match
+ // table data
+ Ndb* pNdb = GETNDB(step);
+ HugoOperations hugoOps(*ctx->getTab());
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+
+ int result= NDBT_OK;
+ do
+ {
+ CHECK(hugoOps.startTransaction(pNdb) == 0);
+ ndbout_c("wait 1");
+ ctx->getPropertyWait("READ_DONE", 1);
+ ndbout_c("wait 1 - done");
+ CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0);
+ CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
+ ctx->setProperty("READ_DONE", 2);
+ ctx->broadcast();
+ ndbout_c("wait 3");
+ ctx->getPropertyWait("READ_DONE", 3);
+ ndbout_c("wait 3 - done");
+
+ NdbSleep_MilliSleep(200);
+ CHECK(hugoOps.execute_Commit(pNdb) == 0);
+ } while(0);
+
+ return NDBT_FAILED;
+}
+
+int
main(int argc, const char** argv){
ndb_init();
@@ -538,6 +606,31 @@ main(int argc, const char** argv){
generate(tmp, 5);
NDBT_TestSuite ts("testOperations");
+
+ {
+ BaseString name("bug_9749");
+ NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts,
+ name.c_str(), "");
+
+ pt->addInitializer(new NDBT_Initializer(pt,
+ "runClearTable",
+ runClearTable));
+
+ pt->addStep(new NDBT_ParallelStep(pt,
+ "thread1",
+ runLockUpgrade1));
+
+
+ pt->addStep(new NDBT_ParallelStep(pt,
+ "thread2",
+ runLockUpgrade2));
+
+ pt->addFinalizer(new NDBT_Finalizer(pt,
+ "runClearTable",
+ runClearTable));
+ ts.addTest(pt);
+ }
+
for(size_t i = 0; i<tmp.size(); i++)
{
BaseString name;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 7025ac2cd1a..cfb7a61c864 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -113,7 +113,6 @@ static const err_code_mapping err_map[]=
{ 4244, HA_ERR_TABLE_EXIST, 1 },
{ 709, HA_ERR_NO_SUCH_TABLE, 1 },
- { 284, HA_ERR_NO_SUCH_TABLE, 1 },
{ 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
{ 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
@@ -363,7 +362,7 @@ void ha_ndbcluster::invalidateDictionaryCache()
int ha_ndbcluster::ndb_err(NdbConnection *trans)
{
int res;
- const NdbError err= trans->getNdbError();
+ NdbError err= trans->getNdbError();
DBUG_ENTER("ndb_err");
ERR_PRINT(err);
@@ -371,6 +370,33 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
case NdbError::SchemaError:
{
invalidateDictionaryCache();
+
+ if (err.code==284)
+ {
+ /*
+ Check if the table is _really_ gone or if the table has
+ been alterend and thus changed table id
+ */
+ NDBDICT *dict= get_ndb()->getDictionary();
+ DBUG_PRINT("info", ("Check if table %s is really gone", m_tabname));
+ if (!(dict->getTable(m_tabname)))
+ {
+ err= dict->getNdbError();
+ DBUG_PRINT("info", ("Table not found, error: %d", err.code));
+ if (err.code != 709)
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Table exist but must have changed"));
+ /* In 5.0, this should be replaced with a mapping to a mysql error */
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Table definition has changed, "\
+ "please retry transaction",
+ MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
break;
}
default:
@@ -2920,7 +2946,11 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
DBUG_PRINT("enter", ("rows: %d", (int)rows));
m_rows_inserted= 0;
- m_rows_to_insert= rows;
+ if (rows == 0)
+ /* We don't know how many will be inserted, guess */
+ m_rows_to_insert= m_autoincrement_prefetch;
+ else
+ m_rows_to_insert= rows;
/*
Calculate how many rows that should be inserted
@@ -3929,6 +3959,10 @@ longlong ha_ndbcluster::get_auto_increment()
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
Ndb *ndb= get_ndb();
+
+ if (m_rows_inserted > m_rows_to_insert)
+ /* We guessed too low */
+ m_rows_to_insert+= m_autoincrement_prefetch;
int cache_size=
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
m_rows_to_insert - m_rows_inserted
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index 4dc42389d89..ca863df7939 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -67,7 +67,7 @@ character-set=latin1
"Kolumn '%-.64s' får inte vara NULL",
"Okänd databas: '%-.64s'",
"Tabellen '%-.64s' finns redan",
-"Okänd tabell '%-.64s'",
+"Okänd tabell '%-.180s'",
"Kolumn '%-.64s' i %s är inte unik",
"Servern går nu ned",
"Okänd kolumn '%-.64s' i %s",