summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_ndbcluster.cc48
-rw-r--r--sql/ha_ndbcluster.h3
2 files changed, 34 insertions, 17 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 2460c2fb81a..d7bd24fe681 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -123,6 +123,8 @@ static const err_code_mapping err_map[]=
{ 827, HA_ERR_RECORD_FILE_FULL },
{ 832, HA_ERR_RECORD_FILE_FULL },
+ { 0, 1 },
+
{ -1, -1 }
};
@@ -258,8 +260,6 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
{
int res;
const NdbError err= trans->getNdbError();
- if (!err.code)
- return 0; // Don't log things to DBUG log if no error
DBUG_ENTER("ndb_err");
ERR_PRINT(err);
@@ -296,10 +296,11 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error));
- if (!m_ndb)
+ Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
+ if (!ndb)
DBUG_RETURN(false);
- const NdbError err= m_ndb->getNdbError(error);
+ const NdbError err= ndb->getNdbError(error);
bool temporary= err.status==NdbError::TemporaryError;
buf->set(err.message, strlen(err.message), &my_charset_bin);
DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
@@ -530,7 +531,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
*/
int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
- uint fieldnr)
+ uint fieldnr, byte* buf)
{
DBUG_ENTER("get_ndb_value");
DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr,
@@ -538,12 +539,15 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
if (field != NULL)
{
+ DBUG_ASSERT(buf);
if (ndb_supported_type(field->type()))
{
DBUG_ASSERT(field->ptr != NULL);
if (! (field->flags & BLOB_FLAG))
- {
- m_value[fieldnr].rec= ndb_op->getValue(fieldnr, field->ptr);
+ {
+ byte *field_buf= buf + (field->ptr - table->record[0]);
+ m_value[fieldnr].rec= ndb_op->getValue(fieldnr,
+ field_buf);
DBUG_RETURN(m_value[fieldnr].rec == NULL);
}
@@ -961,7 +965,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
ERR_RETURN(trans->getNdbError());
// Read key at the same time, for future reference
- if (get_ndb_value(op, NULL, no_fields))
+ if (get_ndb_value(op, NULL, no_fields, NULL))
ERR_RETURN(trans->getNdbError());
}
else
@@ -978,7 +982,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
if ((thd->query_id == field->query_id) ||
retrieve_all_fields)
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
}
else
@@ -1032,7 +1036,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
if (!(field->flags & PRI_KEY_FLAG) &&
(thd->query_id != field->query_id))
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
}
}
@@ -1095,7 +1099,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG))
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
else
@@ -1494,7 +1498,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
(field->flags & PRI_KEY_FLAG) ||
retrieve_all_fields)
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
else
@@ -1513,7 +1517,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
if (!tab->getColumn(hidden_no))
DBUG_RETURN(1);
#endif
- if (get_ndb_value(op, NULL, hidden_no))
+ if (get_ndb_value(op, NULL, hidden_no, NULL))
ERR_RETURN(op->getNdbError());
}
@@ -1535,6 +1539,11 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
DBUG_ENTER("write_row");
+
+ if(m_ignore_dup_key_not_supported)
+ {
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_default_now)
@@ -2493,14 +2502,20 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
-
- DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
- m_use_write= TRUE;
+ if (current_thd->lex->sql_command == SQLCOM_REPLACE)
+ {
+ DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
+ m_use_write= TRUE;
+ } else
+ {
+ m_ignore_dup_key_not_supported= TRUE;
+ }
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= false;
+ m_ignore_dup_key_not_supported= false;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
@@ -3415,6 +3430,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(false),
+ m_ignore_dup_key_not_supported(false),
retrieve_all_fields(FALSE),
rows_to_insert(1),
rows_inserted(0),
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 97382243a01..217ba84b00a 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -211,7 +211,7 @@ class ha_ndbcluster: public handler
int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr);
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
- int get_ndb_value(NdbOperation*, Field *field, uint fieldnr);
+ int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*);
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
@@ -247,6 +247,7 @@ class ha_ndbcluster: public handler
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
+ bool m_ignore_dup_key_not_supported;
bool retrieve_all_fields;
ha_rows rows_to_insert;
ha_rows rows_inserted;