summaryrefslogtreecommitdiff
path: root/sql/log_event.cc
diff options
context:
space:
mode:
authorunknown <aelkin/elkin@dsl-hkibras1-ff5dc300-70.dhcp.inet.fi>2007-05-29 16:36:05 +0300
committerunknown <aelkin/elkin@dsl-hkibras1-ff5dc300-70.dhcp.inet.fi>2007-05-29 16:36:05 +0300
commit814e4dd238b7a12764fc307dfa5945eb3ea670f9 (patch)
tree811c41104b60e8b1f27cbb46772baf34f2bb4aba /sql/log_event.cc
parent4268313e50e8ebe69e1baaef4d84f54c2764dfb1 (diff)
downloadmariadb-git-814e4dd238b7a12764fc307dfa5945eb3ea670f9.tar.gz
Bug#27044 replicated with unique field ndb table allows duplkey inserts
The bug in that slave version of a table with unique field still was able to execute INSERT query as replace whereas it's impossible on master. The reason of this artifact is wrong usage of ndb->extra:s. Fixed with resetting flags at do_after. There is open issue with symmetrical resetting table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY) which i had to hand to bug#27077. The test for the current bug was committed in a cset for bug#27320. sql/log_event.cc: fixing do_after_row_operation to reset the effect of the extra engine's flags set at do_before; comments on meaning of extra flags added; execution of table->file->ha_end_bulk_insert() in do_after is not dependant on error;
Diffstat (limited to 'sql/log_event.cc')
-rw-r--r--sql/log_event.cc38
1 files changed, 31 insertions, 7 deletions
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 9289cf9b12c..edf6851e424 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -6601,10 +6601,23 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
lex->duplicates flag.
*/
thd->lex->sql_command= SQLCOM_REPLACE;
-
- table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster
- table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster
+ /*
+ Do not raise the error flag in case of hitting to an unique attribute
+ */
+ table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ /*
+ NDB specific: update from ndb master wrapped as Write_rows
+ */
+ /*
+ so that the event should be applied to replace slave's row
+ */
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ /*
+ NDB specific: if update from ndb master wrapped as Write_rows
+ does not find the row it's assumed idempotent binlog applying
+ is taking place; don't raise the error.
+ */
+ table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
/*
TODO: the cluster team (Tomas?) says that it's better if the engine knows
how many rows are going to be inserted, then it can allocate needed memory
@@ -6632,9 +6645,20 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
int Write_rows_log_event::do_after_row_operations(TABLE *table, int error)
{
- if (error == 0)
- error= table->file->ha_end_bulk_insert();
- return error;
+ int local_error= 0;
+ table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
+ /*
+ reseting the extra with
+ table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
+ fires bug#27077
+ todo: explain or fix
+ */
+ if (local_error= table->file->ha_end_bulk_insert())
+ {
+ table->file->print_error(local_error, MYF(0));
+ }
+ return error? error : local_error;
}
int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli,