summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
authormskold/marty@linux.site <>2007-10-02 14:23:59 +0200
committermskold/marty@linux.site <>2007-10-02 14:23:59 +0200
commita26772ca21ddf9a64f2b352d23f274fb48653b49 (patch)
tree4db71a531808446bf0e9e4c1e095a5926459b26c /sql/ha_ndbcluster.cc
parent63c7a66d2870cb5a81c773bd5548f3c5938913c1 (diff)
parent403f0afc2926c6d2b3a438f6ba747bb9de5b13ec (diff)
downloadmariadb-git-a26772ca21ddf9a64f2b352d23f274fb48653b49.tar.gz
Merge mysql.com:/windows/Linux_space/MySQL/mysql-5.0-ndb
into mysql.com:/windows/Linux_space/MySQL/mysql-5.1-new-ndb
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc33
1 files changed, 29 insertions, 4 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 239b0fa13ec..69512e5e7c7 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1643,6 +1643,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *re
DBUG_RETURN(0);
}
+bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
+{
+ KEY* key_info= table->key_info + keyno;
+ KEY_PART_INFO* key_part= key_info->key_part;
+ KEY_PART_INFO* end= key_part+key_info->key_parts;
+ uint i;
+ DBUG_ENTER("check_index_fields_in_write_set");
+
+ if (m_retrieve_all_fields)
+ {
+ DBUG_RETURN(true);
+ }
+ for (i= 0; key_part != end; key_part++, i++)
+ {
+ Field* field= key_part->field;
+ if (field->query_id != current_thd->query_id)
+ {
+ DBUG_RETURN(false);
+ }
+ }
+
+ DBUG_RETURN(true);
+}
+
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
const uchar *record, uint keyno)
{
@@ -1961,8 +1985,8 @@ check_null_in_record(const KEY* key_info, const uchar *record)
* primary key or unique index values
*/
-int ha_ndbcluster::peek_indexed_rows(const uchar *record,
- bool check_pk)
+int ha_ndbcluster::peek_indexed_rows(const byte *record,
+ NDB_WRITE_OP write_op)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
@@ -2721,7 +2745,7 @@ int ha_ndbcluster::write_row(uchar *record)
start_bulk_insert will set parameters to ensure that each
write_row is committed individually
*/
- int peek_res= peek_indexed_rows(record, TRUE);
+ int peek_res= peek_indexed_rows(record, NDB_INSERT);
if (!peek_res)
{
@@ -2965,7 +2989,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
{
- int peek_res= peek_indexed_rows(new_data, pk_update);
+ NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
+ int peek_res= peek_indexed_rows(new_data, write_op);
if (!peek_res)
{