summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <dlenev@mysql.com>2006-07-02 01:51:10 +0400
committerunknown <dlenev@mysql.com>2006-07-02 01:51:10 +0400
commitae9724cce160c8d8351df3e2a232cc848b5c6fb8 (patch)
tree48bd251f4cb4e7758b735c1992085a65a6b54dd1 /sql
parent9d7799e7a89235d9799eb0e8acd8a52c79746107 (diff)
downloadmariadb-git-ae9724cce160c8d8351df3e2a232cc848b5c6fb8.tar.gz
Fix for bug#18437 "Wrong values inserted with a before update trigger on
NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens. include/my_base.h: Added HA_EXTRA_WRITE_CAN_REPLACE, HA_EXTRA_WRITE_CANNOT_REPLACE - new parameters for ha_extra() method. We use them to inform handler that write_row() which tries to insert new row into the table and encounters some already existing row with same primary/unique key can replace old row with new row instead of reporting error. mysql-test/r/federated.result: Additional test for bug#18437 "Wrong values inserted with a before update trigger on NDB table". mysql-test/r/ndb_replace.result: Added test for bug #20728 "REPLACE does not work correctly for NDB table with PK and unique index". Updated wrong results from older test. mysql-test/t/federated.test: Additional test for bug#18437 "Wrong values inserted with a before update trigger on NDB table". mysql-test/t/ndb_replace.test: Added test for bug #20728 "REPLACE does not work correctly for NDB table with PK and unique index". sql/ha_ndbcluster.cc: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimization in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). sql/item.cc: Item_trigger_field::setup_field(): Added comment explaining why we don't set Field::query_id in this method. sql/mysql_priv.h: mysql_alter_table() function no longer takes handle_duplicates argument. Added declaration of mark_fields_used_by_triggers_for_insert_stmt() function. sql/sql_delete.cc: Mark fields which are used by ON DELETE triggers so handler will retrieve values for these fields. sql/sql_insert.cc: Explicitly inform handler that we are doing REPLACE (using ha_extra() method) in cases when it can promote insert operation done by write_row() to replace. Also when we do REPLACE we want to store values for all columns so we should inform handler about it. Finally we should mark fields used by ON UPDATE/ON DELETE triggers as such so handler can properly retrieve/restore values in these fields during execution of REPLACE and INSERT ... ON DUPLICATE KEY UPDATE statements. sql/sql_load.cc: Explicitly inform handler that we are doing LOAD DATA REPLACE (using ha_extra() method) in cases when it can promote insert operation done by write_row() to replace. Also when we do replace we want to save (replace) values for all columns so we should inform handler about it. Finally to properly execute LOAD DATA for table with triggers we should mark fields used by ON INSERT triggers as such so handler can properly store values for these fields. sql/sql_parse.cc: mysql_alter_table() function no longer takes handle_duplicates argument. sql/sql_table.cc: Got rid of handle_duplicates argument in mysql_alter_table() and copy_data_between_tables() functions. These functions were always called with handle_duplicates == DUP_ERROR and thus contained dead (and probably incorrect) code. sql/sql_trigger.cc: Added Table_triggers_list::mark_fields_used() method which is used to mark fields read/set by triggers as such so handlers will be able properly retrieve/store values in these fields. sql/sql_trigger.h: Table_triggers_list: Added mark_fields_used() method which is used to mark fields read/set by triggers as such so handlers will be able properly retrieve/store values in these fields. To implement this method added 'trigger_fields' member which is array of lists linking items for all fields used in triggers grouped by event and action time. sql/sql_update.cc: Mark fields which are used by ON UPDATE triggers so handler will retrieve and save values for these fields. mysql-test/r/ndb_trigger.result: Added test for bug#18437 "Wrong values inserted with a before update trigger on NDB table". mysql-test/t/ndb_trigger.test: Added test for bug#18437 "Wrong values inserted with a before update trigger on NDB table".
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_ndbcluster.cc27
-rw-r--r--sql/item.cc9
-rw-r--r--sql/mysql_priv.h6
-rw-r--r--sql/sql_delete.cc6
-rw-r--r--sql/sql_insert.cc77
-rw-r--r--sql/sql_load.cc10
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_table.cc19
-rw-r--r--sql/sql_trigger.cc44
-rw-r--r--sql/sql_trigger.h8
-rw-r--r--sql/sql_update.cc6
11 files changed, 182 insertions, 37 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 46ab5b88624..074e8fb9371 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -3212,20 +3212,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
- if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
- {
- DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
- m_use_write= TRUE;
- } else
- {
- DBUG_PRINT("info", ("Ignoring duplicate key"));
- m_ignore_dup_key= TRUE;
- }
+ DBUG_PRINT("info", ("Ignoring duplicate key"));
+ m_ignore_dup_key= TRUE;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
- DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
- m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
@@ -3255,7 +3246,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
break;
-
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
+ if (!m_has_unique_index)
+ {
+ DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
+ m_use_write= TRUE;
+ }
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE"));
+ DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
+ m_use_write= FALSE;
+ break;
}
DBUG_RETURN(0);
diff --git a/sql/item.cc b/sql/item.cc
index 24efc1f106f..6e26c204c0b 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -5350,9 +5350,14 @@ void Item_insert_value::print(String *str)
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
+ /*
+ There is no sense in marking fields used by trigger with current value
+ of THD::query_id since it is completely unrelated to the THD::query_id
+ value for statements which will invoke trigger. So instead we use
+ Table_triggers_list::mark_fields_used() method which is called during
+ execution of these statements.
+ */
bool save_set_query_id= thd->set_query_id;
-
- /* TODO: Think more about consequences of this step. */
thd->set_query_id= 0;
/*
Try to find field by its name and if it will be found
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 3bb371b6004..798ca3b8967 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -726,9 +726,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
TABLE_LIST *table_list,
List<create_field> &fields,
List<Key> &keys,
- uint order_num, ORDER *order,
- enum enum_duplicates handle_duplicates,
- bool ignore,
+ uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
@@ -764,6 +762,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list);
+void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
+ enum_duplicates duplic);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows rows, ulonglong options,
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index af20b770c56..98bd3ee5a36 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -194,6 +194,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
+
+ if (table->triggers)
+ table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
+
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
@@ -507,6 +511,8 @@ multi_delete::initialize_tables(JOIN *join)
transactional_tables= 1;
else
normal_tables= 1;
+ if (tbl->triggers)
+ tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 8ffc6f53a43..c5ff8061c94 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -241,6 +241,33 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
}
+/*
+ Mark fields used by triggers for INSERT-like statement.
+
+ SYNOPSIS
+ mark_fields_used_by_triggers_for_insert_stmt()
+ thd The current thread
+ table Table to which insert will happen
+ duplic Type of duplicate handling for insert which will happen
+
+ NOTE
+ For REPLACE there is no sense in marking particular fields
+ used by ON DELETE trigger as to execute it properly we have
+ to retrieve and store values for all table columns anyway.
+*/
+
+void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
+ enum_duplicates duplic)
+{
+ if (table->triggers)
+ {
+ table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT);
+ if (duplic == DUP_UPDATE)
+ table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+ }
+}
+
+
bool mysql_insert(THD *thd,TABLE_LIST *table_list,
List<Item> &fields,
List<List_item> &values_list,
@@ -401,6 +428,17 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (duplic == DUP_REPLACE)
+ {
+ if (!table->triggers || !table->triggers->has_delete_triggers())
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ /*
+ REPLACE should change values of all columns so we should mark
+ all columns as columns to be set. As nice side effect we will
+ retrieve columns which values are needed for ON DELETE triggers.
+ */
+ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ }
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@@ -429,6 +467,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
+ mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
+
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
error= 1;
@@ -599,6 +639,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ if (duplic == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/* Reset value of LAST_INSERT_ID if no rows where inserted */
if (!info.copied && thd->insert_id_used)
@@ -1902,7 +1945,8 @@ bool delayed_insert::handle_inserts(void)
{
int error;
ulong max_rows;
- bool using_ignore=0, using_bin_log=mysql_bin_log.is_open();
+ bool using_ignore= 0, using_opt_replace= 0;
+ bool using_bin_log= mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@@ -1964,6 +2008,13 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
using_ignore=1;
}
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers ||
+ !table->triggers->has_delete_triggers()))
+ {
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ using_opt_replace= 1;
+ }
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@@ -1976,6 +2027,11 @@ bool delayed_insert::handle_inserts(void)
using_ignore=0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
}
+ if (using_opt_replace)
+ {
+ using_opt_replace= 0;
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
+ }
if (row->query && row->log_query && using_bin_log)
{
Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE);
@@ -2221,6 +2277,12 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (info.handle_duplicates == DUP_REPLACE)
+ {
+ if (!table->triggers || !table->triggers->has_delete_triggers())
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ }
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@@ -2230,6 +2292,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
check_that_all_fields_are_given_values(thd, table, table_list)) ||
table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd));
+
+ if (!res)
+ mark_fields_used_by_triggers_for_insert_stmt(thd, table,
+ info.handle_duplicates);
DBUG_RETURN(res);
}
@@ -2395,6 +2461,7 @@ bool select_insert::send_eof()
error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/*
We must invalidate the table in the query cache before binlog writing
@@ -2624,6 +2691,12 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (info.handle_duplicates == DUP_REPLACE)
+ {
+ if (!table->triggers || !table->triggers->has_delete_triggers())
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ }
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
@@ -2663,6 +2736,7 @@ bool select_create::send_eof()
else
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
VOID(pthread_mutex_lock(&LOCK_open));
mysql_unlock_tables(thd, lock);
/*
@@ -2696,6 +2770,7 @@ void select_create::abort()
if (table)
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
enum db_type table_type=table->s->db_type;
if (!table->s->tmp_table)
{
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index eaee5edf9f1..40e1e6b07aa 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -225,6 +225,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
+ mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
+
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
List_iterator_fast<Item> it(fields_vars);
@@ -357,6 +359,13 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (handle_duplicates == DUP_REPLACE)
+ {
+ if (!table->triggers ||
+ !table->triggers->has_delete_triggers())
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ }
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
@@ -381,6 +390,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= 1;
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->next_number_field=0;
}
ha_enable_transaction(thd, TRUE);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index ebbe6e6f558..359cd8f9500 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3046,8 +3046,7 @@ end_with_restore_list:
lex->key_list,
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
- lex->duplicates, lex->ignore, &lex->alter_info,
- 1);
+ lex->ignore, &lex->alter_info, 1);
}
break;
}
@@ -6979,7 +6978,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, 0, &alter_info, 1));
+ 0, &alter_info, 1));
}
@@ -6997,7 +6996,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, 0, alter_info, 1));
+ 0, alter_info, 1));
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 275cfbaa088..9f23345f69c 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
static int copy_data_between_tables(TABLE *from,TABLE *to,
- List<create_field> &create,
- enum enum_duplicates handle_duplicates,
- bool ignore,
+ List<create_field> &create, bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,ha_rows *deleted);
static bool prepare_blob_field(THD *thd, create_field *sql_field);
@@ -3128,8 +3126,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
- uint order_num, ORDER *order,
- enum enum_duplicates handle_duplicates, bool ignore,
+ uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
@@ -3724,8 +3721,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
- error=copy_data_between_tables(table,new_table,create_list,
- handle_duplicates, ignore,
+ error=copy_data_between_tables(table, new_table, create_list, ignore,
order_num, order, &copied, &deleted);
}
thd->last_insert_id=next_insert_id; // Needed for correct log
@@ -3948,7 +3944,6 @@ end_temporary:
static int
copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
- enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,
@@ -4051,8 +4046,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
*/
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
- if (ignore ||
- handle_duplicates == DUP_REPLACE)
+ if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;
restore_record(to, s->default_values); // Create empty record
@@ -4079,8 +4073,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
if ((error=to->file->write_row((byte*) to->record[0])))
{
- if ((!ignore &&
- handle_duplicates != DUP_REPLACE) ||
+ if (!ignore ||
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
{
@@ -4158,7 +4151,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
- DUP_ERROR, 0, &lex->alter_info, do_send_ok));
+ 0, &lex->alter_info, do_send_ok));
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index f943b014118..667fdf733e9 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1014,8 +1014,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
}
/*
- Let us bind Item_trigger_field objects representing access to fields
- in old/new versions of row in trigger to Field objects in table being
+ Gather all Item_trigger_field objects representing access to fields
+ in old/new versions of row in trigger into lists containing all such
+ objects for the triggers with same action and timing.
+ */
+ triggers->trigger_fields[lex.trg_chistics.event]
+ [lex.trg_chistics.action_time]=
+ (Item_trigger_field *)(lex.trg_table_fields.first);
+ /*
+ Also let us bind these objects to Field objects in table being
opened.
We ignore errors here, because if even something is wrong we still
@@ -1528,6 +1535,39 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
/*
+ Mark fields of subject table which we read/set in its triggers as such.
+
+ SYNOPSIS
+ mark_fields_used()
+ thd Current thread context
+ event Type of event triggers for which we are going to inspect
+
+ DESCRIPTION
+ This method marks fields of subject table which are read/set in its
+ triggers as such (by setting Field::query_id equal to THD::query_id)
+ and thus informs handler that values for these fields should be
+ retrieved/stored during execution of statement.
+*/
+
+void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
+{
+ int action_time;
+ Item_trigger_field *trg_field;
+
+ for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
+ {
+ for (trg_field= trigger_fields[event][action_time]; trg_field;
+ trg_field= trg_field->next_trg_field)
+ {
+ /* We cannot mark fields which does not present in table. */
+ if (trg_field->field_idx != (uint)-1)
+ table->field[trg_field->field_idx]->query_id = thd->query_id;
+ }
+ }
+}
+
+
+/*
Trigger BUG#14090 compatibility hook
SYNOPSIS
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index b67c22e0588..e736c3e0e1a 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -26,6 +26,11 @@ class Table_triggers_list: public Sql_alloc
/* Triggers as SPs grouped by event, action_time */
sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
+ Heads of the lists linking items for all fields used in triggers
+ grouped by event and action_time.
+ */
+ Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
+ /*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
trigger and DELETE trigger when it is called for REPLACE).
@@ -82,6 +87,7 @@ public:
record1_field(0), table(table_arg)
{
bzero((char *)bodies, sizeof(bodies));
+ bzero((char *)trigger_fields, sizeof(trigger_fields));
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
}
~Table_triggers_list();
@@ -119,6 +125,8 @@ public:
void set_table(TABLE *new_table);
+ void mark_fields_used(THD *thd, trg_event_type event);
+
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
TABLE_LIST *table);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c2b7624c9e7..f282cf19de1 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -433,6 +433,9 @@ int mysql_update(THD *thd,
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
+ if (table->triggers)
+ table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
@@ -755,6 +758,9 @@ reopen_tables:
DBUG_RETURN(TRUE);
}
+ if (table->triggers)
+ table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+
DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
/*
If table will be updated we should not downgrade lock for it and