summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
authorunknown <mskold@mysql.com>2004-11-18 15:55:18 +0100
committerunknown <mskold@mysql.com>2004-11-18 15:55:18 +0100
commit29d84f73de15bf97f3787cb4d598a579a1479567 (patch)
treeabc5a07d466a1899235c15cc9ccc636cd510c63e /sql/ha_ndbcluster.cc
parent1faf1d2349c2ccc29cc17e66bd24906cfcd3dd57 (diff)
parentbe5b6f4d4b29e2393b73f7c9f6a8d17ad7a95422 (diff)
downloadmariadb-git-29d84f73de15bf97f3787cb4d598a579a1479567.tar.gz
Merge mskold@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/usr/local/home/marty/MySQL/test/mysql-4.1 sql/ha_ndbcluster.cc: Auto merged
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc66
1 files changed, 53 insertions, 13 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 468564bddd4..1962065d617 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -109,7 +109,7 @@ static const err_code_mapping err_map[]=
{
{ 626, HA_ERR_KEY_NOT_FOUND },
{ 630, HA_ERR_FOUND_DUPP_KEY },
- { 893, HA_ERR_FOUND_DUPP_UNIQUE },
+ { 893, HA_ERR_FOUND_DUPP_KEY }, // Unique constraint
{ 721, HA_ERR_TABLE_EXIST },
{ 4244, HA_ERR_TABLE_EXIST },
@@ -144,7 +144,7 @@ static int ndb_to_mysql_error(const NdbError *err)
// Push the NDB error message as warning
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
- err->code, err->message, "NDB");
+ err->code, err->message, "NDB");
return err->code;
}
}
@@ -1018,7 +1018,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
- m_retrieve_all_fields)
+ m_retrieve_all_fields ||
+ (field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
@@ -1029,7 +1030,6 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
m_value[i].ptr= NULL;
}
}
-
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
@@ -1093,6 +1093,34 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
DBUG_RETURN(0);
}
+/*
+ Peek to check if a particular row already exists
+*/
+
+int ha_ndbcluster::peek_row()
+{
+ NdbConnection *trans= m_active_trans;
+ NdbOperation *op;
+ THD *thd= current_thd;
+ DBUG_ENTER("peek_row");
+
+ NdbOperation::LockMode lm=
+ (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+ if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
+ op->readTuple(lm) != 0)
+ ERR_RETURN(trans->getNdbError());
+
+ int res;
+ if ((res= set_primary_key(op)))
+ ERR_RETURN(trans->getNdbError());
+
+ if (execute_no_commit_ie(this,trans) != 0)
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(ndb_err(trans));
+ }
+ DBUG_RETURN(0);
+}
/*
Read one record from NDB using unique secondary index
@@ -1138,7 +1166,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
- (field->flags & PRI_KEY_FLAG))
+ (field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ??
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@@ -1566,7 +1594,7 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
Field* field= key_part->field;
uint ndb_fieldnr= key_part->fieldnr-1;
DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr));
- // const NDBCOL *col= tab->getColumn(ndb_fieldnr);
+ //const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr);
uint32 field_len= field->pack_length();
DBUG_DUMP("key", (char*)key, field_len);
@@ -1635,9 +1663,17 @@ int ha_ndbcluster::write_row(byte *record)
int res;
DBUG_ENTER("write_row");
- if(m_ignore_dup_key_not_supported)
+ if(m_ignore_dup_key && table->primary_key != MAX_KEY)
{
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ int peek_res= peek_row();
+
+ if (!peek_res)
+ {
+ m_dupkey= table->primary_key;
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
+ }
+ if (peek_res != HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN(peek_res);
}
statistic_increment(ha_write_count,&LOCK_status);
@@ -1791,7 +1827,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbOperation *op;
uint i;
DBUG_ENTER("update_row");
-
+
statistic_increment(ha_update_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
@@ -2650,15 +2686,15 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= TRUE;
} else
{
- if (table->keys)
- m_ignore_dup_key_not_supported= TRUE;
+ DBUG_PRINT("info", ("Ignoring duplicate key"));
+ m_ignore_dup_key= TRUE;
}
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
- m_ignore_dup_key_not_supported= FALSE;
+ m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
@@ -2677,6 +2713,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
+ m_retrieve_primary_key= TRUE;
break;
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
@@ -2939,6 +2976,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_retrieve_all_fields= FALSE;
+ m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
@@ -3031,6 +3069,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
// Start of statement
m_retrieve_all_fields= FALSE;
+ m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
@@ -3637,9 +3676,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(FALSE),
- m_ignore_dup_key_not_supported(FALSE),
+ m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
m_retrieve_all_fields(FALSE),
+ m_retrieve_primary_key(FALSE),
m_rows_to_insert(1),
m_rows_inserted(0),
m_bulk_insert_rows(1024),