summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc127
1 files changed, 94 insertions, 33 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index b0ddc95a38b..441da21bf1c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -580,7 +580,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
bzero((char*) &table_list,sizeof(table_list));
table_list.db= m_dbname;
table_list.alias= table_list.table_name= m_tabname;
- close_cached_tables(thd, 0, &table_list);
+ close_cached_tables(thd, &table_list, FALSE, FALSE, FALSE);
break;
}
default:
@@ -591,6 +591,24 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
err.code, res));
if (res == HA_ERR_FOUND_DUPP_KEY)
{
+ char *error_data= err.details;
+ uint dupkey= MAX_KEY;
+
+ for (uint i= 0; i < MAX_KEY; i++)
+ {
+ if (m_index[i].type == UNIQUE_INDEX ||
+ m_index[i].type == UNIQUE_ORDERED_INDEX)
+ {
+ const NDBINDEX *unique_index=
+ (const NDBINDEX *) m_index[i].unique_index;
+ if (unique_index &&
+ (char *) unique_index->getObjectId() == error_data)
+ {
+ dupkey= i;
+ break;
+ }
+ }
+ }
if (m_rows_to_insert == 1)
{
/*
@@ -598,7 +616,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
violations here, so we need to return MAX_KEY for non-primary
to signal that key is unknown
*/
- m_dupkey= err.code == 630 ? table_share->primary_key : MAX_KEY;
+ m_dupkey= err.code == 630 ? table_share->primary_key : dupkey;
}
else
{
@@ -621,7 +639,7 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error));
- Ndb *ndb= get_ndb();
+ Ndb *ndb= check_ndb_in_thd(current_thd);
if (!ndb)
DBUG_RETURN(FALSE);
@@ -829,12 +847,12 @@ NdbBlob::ActiveHook g_get_ndb_blobs_value;
/**
Callback to read all blob values.
- - not done in unpack_record because unpack_record is valid
- after execute(Commit) but reading blobs is not
- - may only generate read operations; they have to be executed
- somewhere before the data is available
- - due to single buffer for all blobs, we let the last blob
- process all blobs (last so that all are active)
+ - not done in unpack_record because unpack_record is valid
+ after execute(Commit) but reading blobs is not
+ - may only generate read operations; they have to be executed
+ somewhere before the data is available
+ - due to single buffer for all blobs, we let the last blob
+ process all blobs (last so that all are active)
- null bit is still set in unpack_record.
@todo
@@ -2708,6 +2726,29 @@ int ha_ndbcluster::full_table_scan(uchar *buf)
DBUG_RETURN(next_result(buf));
}
+int
+ha_ndbcluster::set_auto_inc(Field *field)
+{
+ DBUG_ENTER("ha_ndbcluster::set_auto_inc");
+ Ndb *ndb= get_ndb();
+ bool read_bit= bitmap_is_set(table->read_set, field->field_index);
+ bitmap_set_bit(table->read_set, field->field_index);
+ Uint64 next_val= (Uint64) field->val_int() + 1;
+ if (!read_bit)
+ bitmap_clear_bit(table->read_set, field->field_index);
+#ifndef DBUG_OFF
+ char buff[22];
+ DBUG_PRINT("info",
+ ("Trying to set next auto increment value to %s",
+ llstr(next_val, buff)));
+#endif
+ Ndb_tuple_id_range_guard g(m_share);
+ if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
+ == -1)
+ ERR_RETURN(ndb->getNdbError());
+ DBUG_RETURN(0);
+}
+
/**
Insert one record into NDB.
*/
@@ -2914,18 +2955,11 @@ int ha_ndbcluster::write_row(uchar *record)
}
if ((has_auto_increment) && (m_skip_auto_increment))
{
- Ndb *ndb= get_ndb();
- Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
-#ifndef DBUG_OFF
- char buff[22];
- DBUG_PRINT("info",
- ("Trying to set next auto increment value to %s",
- llstr(next_val, buff)));
-#endif
- Ndb_tuple_id_range_guard g(m_share);
- if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
- == -1)
- ERR_RETURN(ndb->getNdbError());
+ int ret_val;
+ if ((ret_val= set_auto_inc(table->next_number_field)))
+ {
+ DBUG_RETURN(ret_val);
+ }
}
m_skip_auto_increment= TRUE;
@@ -3052,6 +3086,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
// Insert new row
DBUG_PRINT("info", ("delete succeded"));
m_primary_key_update= TRUE;
+ /*
+ If we are updating a primary key with auto_increment
+ then we need to update the auto_increment counter
+ */
+ if (table->found_next_number_field &&
+ bitmap_is_set(table->write_set,
+ table->found_next_number_field->field_index) &&
+ (error= set_auto_inc(table->found_next_number_field)))
+ {
+ DBUG_RETURN(error);
+ }
insert_res= write_row(new_data);
m_primary_key_update= FALSE;
if (insert_res)
@@ -3074,7 +3119,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
DBUG_PRINT("info", ("delete+insert succeeded"));
DBUG_RETURN(0);
}
-
+ /*
+ If we are updating a unique key with auto_increment
+ then we need to update the auto_increment counter
+ */
+ if (table->found_next_number_field &&
+ bitmap_is_set(table->write_set,
+ table->found_next_number_field->field_index) &&
+ (error= set_auto_inc(table->found_next_number_field)))
+ {
+ DBUG_RETURN(error);
+ }
if (cursor)
{
/*
@@ -4481,9 +4536,11 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb)
// store thread specific data first to set the right context
m_force_send= thd->variables.ndb_force_send;
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
- m_autoincrement_prefetch=
- (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz;
-
+ m_autoincrement_prefetch=
+ (thd->variables.ndb_autoincrement_prefetch_sz >
+ NDB_DEFAULT_AUTO_PREFETCH) ?
+ (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz
+ : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH;
m_active_trans= thd_ndb->trans;
DBUG_ASSERT(m_active_trans);
// Start of transaction
@@ -6171,8 +6228,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *first_value,
ulonglong *nb_reserved_values)
{
- int cache_size;
+ uint cache_size;
Uint64 auto_value;
+ THD *thd= current_thd;
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
Ndb *ndb= get_ndb();
@@ -6182,11 +6240,14 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
/* We guessed too low */
m_rows_to_insert+= m_autoincrement_prefetch;
}
- cache_size=
- (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
- m_rows_to_insert - m_rows_inserted :
- ((m_rows_to_insert > m_autoincrement_prefetch) ?
- m_rows_to_insert : m_autoincrement_prefetch));
+ uint remaining= m_rows_to_insert - m_rows_inserted;
+ uint min_prefetch=
+ (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ?
+ thd->variables.ndb_autoincrement_prefetch_sz
+ : remaining;
+ cache_size= ((remaining < m_autoincrement_prefetch) ?
+ min_prefetch
+ : remaining);
uint retries= NDB_AUTO_INCREMENT_RETRIES;
int retry_sleep= 30; /* 30 milliseconds, transaction */
for (;;)
@@ -6273,7 +6334,7 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg):
m_dupkey((uint) -1),
m_ha_not_exact_count(FALSE),
m_force_send(TRUE),
- m_autoincrement_prefetch((ha_rows) 32),
+ m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH),
m_transaction_on(TRUE),
m_cond(NULL),
m_multi_cursor(NULL)
@@ -8078,7 +8139,7 @@ int handle_trailing_share(NDB_SHARE *share)
table_list.db= share->db;
table_list.alias= table_list.table_name= share->table_name;
safe_mutex_assert_owner(&LOCK_open);
- close_cached_tables(thd, 0, &table_list, TRUE);
+ close_cached_tables(thd, &table_list, TRUE, FALSE, FALSE);
pthread_mutex_lock(&ndbcluster_mutex);
/* ndb_share reference temporary free */