summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <mskold/marty@mysql.com/linux.site>2006-11-30 23:10:11 +0100
committerunknown <mskold/marty@mysql.com/linux.site>2006-11-30 23:10:11 +0100
commitbae9fc077ca178a63f7d97e4b161c35a3e63b85d (patch)
treeb7f1174cbd4bed3e76e64a71a4d6e86274b90687
parent910b5c5930bb2de8df75ea812b51fb78a7af2aeb (diff)
downloadmariadb-git-bae9fc077ca178a63f7d97e4b161c35a3e63b85d.tar.gz
bug#18487 UPDATE IGNORE not supported for unique constraint violation of non-primary key: only check pk if it is updated
-rw-r--r--sql/ha_ndbcluster.cc17
-rw-r--r--sql/ha_ndbcluster.h2
2 files changed, 10 insertions, 9 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 59f213394ad..0fc884f8e68 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1885,7 +1885,8 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans,
* primary key or unique index values
*/
-int ha_ndbcluster::peek_indexed_rows(const byte *record)
+int ha_ndbcluster::peek_indexed_rows(const byte *record,
+ bool check_pk)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
@@ -1897,7 +1898,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record)
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
first= NULL;
- if (table->s->primary_key != MAX_KEY)
+ if (check_pk && table->s->primary_key != MAX_KEY)
{
/*
* Fetch any row with colliding primary key
@@ -2622,7 +2623,7 @@ int ha_ndbcluster::write_row(byte *record)
start_bulk_insert will set parameters to ensure that each
write_row is committed individually
*/
- int peek_res= peek_indexed_rows(record);
+ int peek_res= peek_indexed_rows(record, true);
if (!peek_res)
{
@@ -2831,6 +2832,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
uint32 old_part_id= 0, new_part_id= 0;
int error;
longlong func_value;
+ bool pk_update= (table_share->primary_key != MAX_KEY &&
+ key_cmp(table_share->primary_key, old_data, new_data));
DBUG_ENTER("update_row");
m_write_op= TRUE;
@@ -2838,9 +2841,9 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
* If IGNORE the ignore constraint violations on primary and unique keys,
* but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
*/
- if (m_ignore_dup_key && thd->lex->sql_command != SQLCOM_INSERT)
+ if (m_ignore_dup_key && thd->lex->sql_command == SQLCOM_UPDATE)
{
- int peek_res= peek_indexed_rows(new_data);
+ int peek_res= peek_indexed_rows(new_data, pk_update);
if (!peek_res)
{
@@ -2870,9 +2873,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
* Check for update of primary key or partition change
* for special handling
*/
- if (((table_share->primary_key != MAX_KEY) &&
- key_cmp(table_share->primary_key, old_data, new_data)) ||
- (old_part_id != new_part_id))
+ if (pk_update || old_part_id != new_part_id)
{
int read_res, insert_res, delete_res, undo_res;
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 8e153e0bbc4..ed9e5ea41cc 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -846,7 +846,7 @@ private:
const NdbOperation *first,
const NdbOperation *last,
uint errcode);
- int peek_indexed_rows(const byte *record);
+ int peek_indexed_rows(const byte *record, bool check_pk);
int fetch_next(NdbScanOperation* op);
int next_result(byte *buf);
int define_read_attrs(byte* buf, NdbOperation* op);