summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
authorunknown <tomas@poseidon.ndb.mysql.com>2006-06-14 20:16:32 +0200
committerunknown <tomas@poseidon.ndb.mysql.com>2006-06-14 20:16:32 +0200
commitca1fd91f160a812f5da149cfb9a8065cf78bf8ad (patch)
treeb39b0aa6d5ac4b74a72072f65acc8aa90b2bc49a /sql/ha_ndbcluster.cc
parent234de474750c637f4b9d6f609af94e10ccaf3a42 (diff)
downloadmariadb-git-ca1fd91f160a812f5da149cfb9a8065cf78bf8ad.tar.gz
Bug #19493 NDB does not ignore duplicate keys when using LOAD DATA LOCAL
- make sure to disable bulk insert when check for duplicate key is needed mysql-test/r/ndb_loaddatalocal.result: New BitKeeper file ``mysql-test/r/ndb_loaddatalocal.result'' mysql-test/t/ndb_loaddatalocal.test: New BitKeeper file ``mysql-test/t/ndb_loaddatalocal.test''
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc18
1 files changed, 18 insertions, 0 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index b2f60fdaf2e..5be09f697a0 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1836,6 +1836,11 @@ int ha_ndbcluster::write_row(byte *record)
if(m_ignore_dup_key && table->primary_key != MAX_KEY)
{
+ /*
+ compare if expression with that in start_bulk_insert()
+ start_bulk_insert will set parameters to ensure that each
+ write_row is committed individually
+ */
int peek_res= peek_row(record);
if (!peek_res)
@@ -2996,6 +3001,19 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
DBUG_PRINT("enter", ("rows: %d", (int)rows));
m_rows_inserted= (ha_rows) 0;
+ if (!m_use_write && m_ignore_dup_key)
+ {
+ /*
+ compare if expression with that in write_row
+ we have a situation where peek_row() will be called
+ so we cannot batch
+ */
+ DBUG_PRINT("info", ("Batching turned off as duplicate key is "
+ "ignored by using peek_row"));
+ m_rows_to_insert= 1;
+ m_bulk_insert_rows= 1;
+ DBUG_VOID_RETURN;
+ }
if (rows == (ha_rows) 0)
{
/* We don't know how many will be inserted, guess */