From 4cf742a8a809aee1b1c454ac7fc7969555c2aed8 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Mon, 5 Nov 2007 20:18:22 +0100 Subject: ha_ndbcluster.h: Bug #31956 auto increment bugs in MySQL Cluster: Added utility method and constant for internal prefetch default ndb_auto_increment.result: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/r/ndb_auto_increment.result mysqld.cc: Bug #25176 Trying to set ndb_autoincrement_prefetch_sz always fails: Changed pointer to max value Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement) ndb_insert.test, ndb_insert.result: Moved auto_increment tests to ndb_auto_increment.test ndb_auto_increment.test: BitKeeper file /home/marty/MySQL/mysql-5.0-ndb/mysql-test/t/ndb_auto_increment.test ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Changed meaning of ndb_autoincrement_prefetch_sz to specify prefetch between statements, changed default to 1 (with internal prefetch to at least 32 inside a statement), added handling of updates of pk/unique key with auto_increment Bug #32055 Cluster does not handle auto inc correctly with insert ignore statement --- sql/ha_ndbcluster.cc | 84 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 22 deletions(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 00e4621ec1a..de6187da70f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2259,6 +2259,25 @@ int ha_ndbcluster::full_table_scan(byte *buf) DBUG_RETURN(next_result(buf)); } +int +ha_ndbcluster::set_auto_inc(Field *field) +{ + Ndb *ndb= get_ndb(); + Uint64 next_val= (Uint64) field->val_int() + 1; + DBUG_ENTER("ha_ndbcluster::set_auto_inc"); +#ifndef DBUG_OFF + char buff[22]; + DBUG_PRINT("info", + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); +#endif + if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); + DBUG_RETURN(0); +} + + /* Insert one record into NDB */ @@ -2413,17 +2432,11 @@ int ha_ndbcluster::write_row(byte *record) } if ((has_auto_increment) && (m_skip_auto_increment)) { - Ndb *ndb= get_ndb(); - Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; -#ifndef DBUG_OFF - char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(next_val, buff))); -#endif - if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); + int ret_val; + if ((ret_val= set_auto_inc(table->next_number_field))) + { + DBUG_RETURN(ret_val); + } } m_skip_auto_increment= TRUE; @@ -2476,6 +2489,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; uint i; + int auto_res; bool pk_update= (table->s->primary_key != MAX_KEY && key_cmp(table->s->primary_key, old_data, new_data)); DBUG_ENTER("update_row"); @@ -2531,6 +2545,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) // Insert new row DBUG_PRINT("info", ("delete succeded")); m_primary_key_update= TRUE; + /* + If we are updating a primary key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + table->found_next_number_field->query_id == thd->query_id && + (auto_res= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(auto_res); + } insert_res= write_row(new_data); m_primary_key_update= FALSE; if (insert_res) @@ -2553,7 +2577,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_RETURN(0); } - + /* + If we are updating a unique key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + table->found_next_number_field->query_id == thd->query_id && + (auto_res= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(auto_res); + } if (cursor) { /* @@ -3841,9 +3874,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // store thread specific data first to set the right context m_force_send= thd->variables.ndb_force_send; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; - m_autoincrement_prefetch= - (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; - + m_autoincrement_prefetch= + (thd->variables.ndb_autoincrement_prefetch_sz > + NDB_DEFAULT_AUTO_PREFETCH) ? + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz + : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH; m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; DBUG_ASSERT(m_active_trans); // Start of transaction @@ -4868,8 +4903,9 @@ ulonglong ha_ndbcluster::get_auto_increment() { int cache_size; Uint64 auto_value; - Uint64 step= current_thd->variables.auto_increment_increment; - Uint64 start= current_thd->variables.auto_increment_offset; + THD *thd= current_thd; + Uint64 step= thd->variables.auto_increment_increment; + Uint64 start= thd->variables.auto_increment_offset; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); @@ -4879,11 +4915,15 @@ ulonglong ha_ndbcluster::get_auto_increment() /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } + int remaining= m_rows_to_insert - m_rows_inserted; + int min_prefetch= + (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? + thd->variables.ndb_autoincrement_prefetch_sz + : remaining; cache_size= - (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted : - ((m_rows_to_insert > m_autoincrement_prefetch) ? - m_rows_to_insert : m_autoincrement_prefetch)); + (int) ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) @@ -4953,7 +4993,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_dupkey((uint) -1), m_ha_not_exact_count(FALSE), m_force_send(TRUE), - m_autoincrement_prefetch((ha_rows) 32), + m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH), m_transaction_on(TRUE), m_cond(NULL), m_multi_cursor(NULL) -- cgit v1.2.1 From 022b835c67269bcae7e44ba873704d89fd9bd23c Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Tue, 6 Nov 2007 10:28:11 +0100 Subject: Removed compiler warnings --- sql/ha_ndbcluster.cc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index de6187da70f..d29e9345c11 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4901,7 +4901,7 @@ int ha_ndbcluster::drop_table() ulonglong ha_ndbcluster::get_auto_increment() { - int cache_size; + uint cache_size; Uint64 auto_value; THD *thd= current_thd; Uint64 step= thd->variables.auto_increment_increment; @@ -4915,15 +4915,14 @@ ulonglong ha_ndbcluster::get_auto_increment() /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } - int remaining= m_rows_to_insert - m_rows_inserted; - int min_prefetch= + uint remaining= m_rows_to_insert - m_rows_inserted; + uint min_prefetch= (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? thd->variables.ndb_autoincrement_prefetch_sz : remaining; - cache_size= - (int) ((remaining < m_autoincrement_prefetch) ? - min_prefetch - : remaining); + cache_size= ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) -- cgit v1.2.1 From 56ab11c8cb3e00ce5eb88ca960422723de1b7fa8 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Tue, 6 Nov 2007 10:57:49 +0100 Subject: ndb_auto_increment.result: Rename: mysql-test/r/ndb_auto_increment.result -> mysql-test/suite/ndb/r/ndb_auto_increment.result ndb_auto_increment.test, ndb_auto_increment.result: Bug #31956 auto increment bugs in MySQL Cluster: Adapted test cases ha_ndbcluster.cc: Bug #31956 auto increment bugs in MySQL Cluster: Merging from 5.0 ndb_auto_increment.test: Rename: mysql-test/t/ndb_auto_increment.test -> mysql-test/suite/ndb/t/ndb_auto_increment.test --- sql/ha_ndbcluster.cc | 89 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 23 deletions(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2294d836854..93722da574c 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2704,6 +2704,29 @@ int ha_ndbcluster::full_table_scan(uchar *buf) DBUG_RETURN(next_result(buf)); } +int +ha_ndbcluster::set_auto_inc(Field *field) +{ + DBUG_ENTER("ha_ndbcluster::set_auto_inc"); + Ndb *ndb= get_ndb(); + bool read_bit= bitmap_is_set(table->read_set, field->field_index); + bitmap_set_bit(table->read_set, field->field_index); + Uint64 next_val= (Uint64) field->val_int() + 1; + if (!read_bit) + bitmap_clear_bit(table->read_set, field->field_index); +#ifndef DBUG_OFF + char buff[22]; + DBUG_PRINT("info", + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); +#endif + Ndb_tuple_id_range_guard g(m_share); + if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); + DBUG_RETURN(0); +} + /* Insert one record into NDB */ @@ -2910,18 +2933,11 @@ int ha_ndbcluster::write_row(uchar *record) } if ((has_auto_increment) && (m_skip_auto_increment)) { - Ndb *ndb= get_ndb(); - Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; -#ifndef DBUG_OFF - char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(next_val, buff))); -#endif - Ndb_tuple_id_range_guard g(m_share); - if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); + int ret_val; + if ((ret_val= set_auto_inc(table->next_number_field))) + { + DBUG_RETURN(ret_val); + } } m_skip_auto_increment= TRUE; @@ -3046,6 +3062,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) // Insert new row DBUG_PRINT("info", ("delete succeded")); m_primary_key_update= TRUE; + /* + If we are updating a primary key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index) && + (error= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(error); + } insert_res= write_row(new_data); m_primary_key_update= FALSE; if (insert_res) @@ -3068,7 +3095,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_RETURN(0); } - + /* + If we are updating a unique key with auto_increment + then we need to update the auto_increment counter + */ + if (table->found_next_number_field && + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index) && + (error= set_auto_inc(table->found_next_number_field))) + { + DBUG_RETURN(error); + } if (cursor) { /* @@ -4478,9 +4515,11 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb) // store thread specific data first to set the right context m_force_send= thd->variables.ndb_force_send; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; - m_autoincrement_prefetch= - (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; - + m_autoincrement_prefetch= + (thd->variables.ndb_autoincrement_prefetch_sz > + NDB_DEFAULT_AUTO_PREFETCH) ? + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz + : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH; m_active_trans= thd_ndb->trans; DBUG_ASSERT(m_active_trans); // Start of transaction @@ -6163,8 +6202,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong *first_value, ulonglong *nb_reserved_values) { - int cache_size; + uint cache_size; Uint64 auto_value; + THD *thd= current_thd; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); @@ -6174,11 +6214,14 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } - cache_size= - (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted : - ((m_rows_to_insert > m_autoincrement_prefetch) ? - m_rows_to_insert : m_autoincrement_prefetch)); + uint remaining= m_rows_to_insert - m_rows_inserted; + uint min_prefetch= + (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ? + thd->variables.ndb_autoincrement_prefetch_sz + : remaining; + cache_size= ((remaining < m_autoincrement_prefetch) ? + min_prefetch + : remaining); uint retries= NDB_AUTO_INCREMENT_RETRIES; int retry_sleep= 30; /* 30 milliseconds, transaction */ for (;;) @@ -6265,7 +6308,7 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): m_dupkey((uint) -1), m_ha_not_exact_count(FALSE), m_force_send(TRUE), - m_autoincrement_prefetch((ha_rows) 32), + m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH), m_transaction_on(TRUE), m_cond(NULL), m_multi_cursor(NULL) -- cgit v1.2.1 From 27c025061d8bd3478ea4bd2512dbf2e4ca0c56db Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Thu, 6 Dec 2007 17:15:21 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Return correct key for non-batching inserts --- sql/ha_ndbcluster.cc | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d29e9345c11..478347e4175 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -540,6 +540,27 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) { + uint error_data= (uint) err.details; + uint dupkey= MAX_KEY; + + DBUG_PRINT("info", ("HA_ERR_FOUND_DUPP_KEY, index table %u", error_data)); + for (uint i= 0; i < MAX_KEY; i++) + { + if (m_index[i].type == UNIQUE_INDEX || + m_index[i].type == UNIQUE_ORDERED_INDEX) + { + const NDBINDEX *unique_index= + (const NDBINDEX *) m_index[i].unique_index; + if (unique_index && + unique_index->getIndexTable() && + (uint) unique_index->getIndexTable()->getTableId() == error_data) + { + DBUG_PRINT("info", ("Found violated key %u", i)); + dupkey= i; + break; + } + } + } if (m_rows_to_insert == 1) { /* @@ -547,7 +568,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) violations here, so we need to return MAX_KEY for non-primary to signal that key is unknown */ - m_dupkey= err.code == 630 ? table->s->primary_key : MAX_KEY; + m_dupkey= err.code == 630 ? table->s->primary_key : dupkey; } else { -- cgit v1.2.1 From fc6f839b03932fc527b16f7c78d5d3317cb903ed Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Fri, 7 Dec 2007 10:33:50 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Post-review fixes --- sql/ha_ndbcluster.cc | 2 -- 1 file changed, 2 deletions(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 478347e4175..147aeeb24a8 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -543,7 +543,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) uint error_data= (uint) err.details; uint dupkey= MAX_KEY; - DBUG_PRINT("info", ("HA_ERR_FOUND_DUPP_KEY, index table %u", error_data)); for (uint i= 0; i < MAX_KEY; i++) { if (m_index[i].type == UNIQUE_INDEX || @@ -555,7 +554,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) unique_index->getIndexTable() && (uint) unique_index->getIndexTable()->getTableId() == error_data) { - DBUG_PRINT("info", ("Found violated key %u", i)); dupkey= i; break; } -- cgit v1.2.1 From af908e61b738f636400ff04fc0169e7ce5b0ae80 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Fri, 7 Dec 2007 11:05:19 +0100 Subject: bug#21072 Duplicate key error in NDB references wrong key: Post-merge fixes --- sql/ha_ndbcluster.cc | 1 - 1 file changed, 1 deletion(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 55c45f6c687..a90f854f889 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -599,7 +599,6 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) const NDBINDEX *unique_index= (const NDBINDEX *) m_index[i].unique_index; if (unique_index && - unique_index->getIndexTable() && (uint) unique_index->getObjectId() == error_data) { dupkey= i; -- cgit v1.2.1 From 6047fb2e44ad06f3ad69160bde869f196afd07a7 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Fri, 7 Dec 2007 13:42:44 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 147aeeb24a8..b6d496fe915 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -540,7 +540,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) { - uint error_data= (uint) err.details; + char *error_data= err.details; uint dupkey= MAX_KEY; for (uint i= 0; i < MAX_KEY; i++) @@ -552,7 +552,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) (const NDBINDEX *) m_index[i].unique_index; if (unique_index && unique_index->getIndexTable() && - (uint) unique_index->getIndexTable()->getTableId() == error_data) + unique_index->getIndexTable()->getTableId() == (int) error_data) { dupkey= i; break; -- cgit v1.2.1 From 4619caa5d50a7eb6669bb7e1c0a8c647137c6916 Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Fri, 7 Dec 2007 14:03:54 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b6d496fe915..78520e4c6d1 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -552,7 +552,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) (const NDBINDEX *) m_index[i].unique_index; if (unique_index && unique_index->getIndexTable() && - unique_index->getIndexTable()->getTableId() == (int) error_data) + (char *) unique_index->getIndexTable()->getTableId() == error_data) { dupkey= i; break; -- cgit v1.2.1 From 0c6288277611ff02c786424c3667ae8b6e00ef3f Mon Sep 17 00:00:00 2001 From: "mskold/marty@mysql.com/quadfish.(none)" <> Date: Fri, 7 Dec 2007 14:06:44 +0100 Subject: Removed illegal cast --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql/ha_ndbcluster.cc') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index fea50aeecdb..2d8df0f6a47 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -599,7 +599,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) const NDBINDEX *unique_index= (const NDBINDEX *) m_index[i].unique_index; if (unique_index && - (char *) unique_index->getObjectId() == (int) error_data) + (char *) unique_index->getObjectId() == error_data) { dupkey= i; break; -- cgit v1.2.1