summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <mskold/marty@mysql.com/linux.site>2007-04-04 12:50:39 +0200
committerunknown <mskold/marty@mysql.com/linux.site>2007-04-04 12:50:39 +0200
commit2efc0f51cf3e80cd49c1ea0dee9a440936ee6287 (patch)
tree3330e0cd8c3d7c512db5cc6a3e6df4d4240eaed8
parentef60c96f8bb7c8ab0c044750b39f56f1eb1953ea (diff)
downloadmariadb-git-2efc0f51cf3e80cd49c1ea0dee9a440936ee6287.tar.gz
Bug #26242 UPDATE with subquery and triggers failing with cluster tables
In certain cases AFTER UPDATE/DELETE triggers on NDB tables that referenced subject table didn't see the results of operation which caused invocation of those triggers. In other words AFTER trigger invoked as result of update (or deletion) of particular row saw version of this row before update (or deletion). The problem occured because NDB handler in those cases postponed actual update/delete operations to be able to perform them later as one batch. This fix solves the problem by disabling this optimization for particular operation if subject table has AFTER trigger for this operation defined. To achieve this we introduce two new flags for handler::extra() method: HA_EXTRA_DELETE_CANNOT_BATCH and HA_EXTRA_UPDATE_CANNOT_BATCH. These are called if there exists AFTER DELETE/UPDATE triggers during a statement that potentially can generate calls to delete_row()/update_row(). This includes multi_delete/multi_update statements as well as insert statements that do delete/update as part of an ON DUPLICATE statement. include/my_base.h: Added HA_EXTRA_DELETE_CANNOT_BATCH and HA_EXTRA_UPDATE_CANNOT_BATCH to inform handler when batching of delete/update is not possible. mysql-test/r/ndb_trigger.result: Bug #26242 UPDATE with subquery and triggers failing with cluster tables --- Added new test cases mysql-test/t/ndb_trigger.test: Bug #26242 UPDATE with subquery and triggers failing with cluster tables --- Added new test cases sql/ha_ndbcluster.cc: Bug #26242 UPDATE with subquery and triggers failing with cluster tables: Use HA_EXTRA_DELETE_CANNOT_BATCH and HA_EXTRA_UPDATE_CANNOT_BATCH to inform handler when batching of delete/update is not possible sql/ha_ndbcluster.h: Bug #26242 UPDATE with subquery and triggers failing with cluster tables: Added member variables for handling of HA_EXTRA_DELETE_CANNOT_BATCH and HA_EXTRA_UPDATE_CANNOT_BATCH to inform handler when batching of delete/update is not possible sql/mysql_priv.h: Added new method prepare_triggers_for_insert_stmt to check if batching of delete/update must be disallowed. sql/sql_delete.cc: Use HA_EXTRA_DELETE_CANNOT_BATCH to inform handler when batching of delete is not possible sql/sql_insert.cc: Added method prepare_triggers_for_insert_stmt to check if batching of delete/update must be dissallowed. Use HA_EXTRA_DELETE_CANNOT_BATCH and HA_EXTRA_UPDATE_CANNOT_BATCH to inform handler when batching of delete/update is not possible sql/sql_load.cc: Call prepare_triggers_for_insert_stmt to check if batching of delete/update must be dissallowed and mark fields used by triggers for the insert statement. sql/sql_trigger.h: Added has_triggers to support what particular triggers exist on a table. sql/sql_update.cc: Use HA_EXTRA_UPDATE_CANNOT_BATCH to inform handler when batching of update is not possible
-rw-r--r--include/my_base.h10
-rw-r--r--mysql-test/r/ndb_trigger.result171
-rw-r--r--mysql-test/t/ndb_trigger.test108
-rw-r--r--sql/ha_ndbcluster.cc23
-rw-r--r--sql/ha_ndbcluster.h2
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/sql_delete.cc24
-rw-r--r--sql/sql_insert.cc51
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_trigger.h5
-rw-r--r--sql/sql_update.cc26
11 files changed, 416 insertions, 8 deletions
diff --git a/include/my_base.h b/include/my_base.h
index dda64db2ef9..633cd8c8df0 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -161,7 +161,15 @@ enum ha_extra_function {
Off by default.
*/
HA_EXTRA_WRITE_CAN_REPLACE,
- HA_EXTRA_WRITE_CANNOT_REPLACE
+ HA_EXTRA_WRITE_CANNOT_REPLACE,
+ /*
+ Inform handler that delete_row()/update_row() cannot batch deletes/updates
+ and should perform them immediately. This may be needed when table has
+ AFTER DELETE/UPDATE triggers which access to subject table.
+ These flags are reset by the handler::extra(HA_EXTRA_RESET) call.
+ */
+ HA_EXTRA_DELETE_CANNOT_BATCH,
+ HA_EXTRA_UPDATE_CANNOT_BATCH
};
/* The following is parameter to ha_panic() */
diff --git a/mysql-test/r/ndb_trigger.result b/mysql-test/r/ndb_trigger.result
index 27f83df70c9..562c5120715 100644
--- a/mysql-test/r/ndb_trigger.result
+++ b/mysql-test/r/ndb_trigger.result
@@ -116,4 +116,175 @@ op a b
d 1 1.050000000000000000000000000000
d 2 2.050000000000000000000000000000
drop tables t1, t2, t3;
+CREATE TABLE t1 (
+id INT NOT NULL PRIMARY KEY,
+xy INT
+) ENGINE=ndbcluster;
+INSERT INTO t1 VALUES (1, 0);
+CREATE TRIGGER t1_update AFTER UPDATE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id = NEW.id; END //
+CREATE TABLE t2 (
+id INT NOT NULL PRIMARY KEY,
+xy INT
+) ENGINE=ndbcluster;
+INSERT INTO t2 VALUES (2, 0);
+CREATE TABLE t3 (id INT NOT NULL PRIMARY KEY) ENGINE=ndbcluster;
+INSERT INTO t3 VALUES (1);
+CREATE TABLE t4 LIKE t1;
+CREATE TRIGGER t4_update AFTER UPDATE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id = NEW.id; END //
+CREATE TABLE t5 LIKE t2;
+UPDATE t1 SET xy = 3 WHERE id = 1;
+SELECT xy FROM t1 where id = 1;
+xy
+3
+SELECT xy FROM t2 where id = 1;
+xy
+3
+UPDATE t1 SET xy = 4 WHERE id IN (SELECT id FROM t3 WHERE id = 1);
+SELECT xy FROM t1 where id = 1;
+xy
+4
+SELECT xy FROM t2 where id = 1;
+xy
+4
+INSERT INTO t4 SELECT * FROM t1;
+INSERT INTO t5 SELECT * FROM t2;
+UPDATE t1,t4 SET t1.xy = 3, t4.xy = 3 WHERE t1.id = 1 AND t4.id = 1;
+SELECT xy FROM t1 where id = 1;
+xy
+3
+SELECT xy FROM t2 where id = 1;
+xy
+3
+SELECT xy FROM t4 where id = 1;
+xy
+3
+SELECT xy FROM t5 where id = 1;
+xy
+3
+UPDATE t1,t4 SET t1.xy = 4, t4.xy = 4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 1) AND t4.id IN (SELECT id FROM t3 WHERE id = 1);
+SELECT xy FROM t1 where id = 1;
+xy
+4
+SELECT xy FROM t2 where id = 1;
+xy
+4
+SELECT xy FROM t4 where id = 1;
+xy
+4
+SELECT xy FROM t5 where id = 1;
+xy
+4
+INSERT INTO t1 VALUES (1,0) ON DUPLICATE KEY UPDATE xy = 5;
+SELECT xy FROM t1 where id = 1;
+xy
+5
+SELECT xy FROM t2 where id = 1;
+xy
+5
+DROP TRIGGER t1_update;
+DROP TRIGGER t4_update;
+CREATE TRIGGER t1_delete AFTER DELETE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id > 4; END //
+CREATE TRIGGER t4_delete AFTER DELETE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id > 4; END //
+INSERT INTO t1 VALUES (5, 0),(6,0);
+INSERT INTO t2 VALUES (5, 1),(6,1);
+INSERT INTO t3 VALUES (5);
+SELECT * FROM t1 order by id;
+id xy
+1 5
+5 0
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 1
+DELETE FROM t1 WHERE id IN (SELECT id FROM t3 WHERE id = 5);
+SELECT * FROM t1 order by id;
+id xy
+1 5
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 0
+INSERT INTO t1 VALUES (5,0);
+UPDATE t2 SET xy = 1 WHERE id = 6;
+TRUNCATE t4;
+INSERT INTO t4 SELECT * FROM t1;
+TRUNCATE t5;
+INSERT INTO t5 SELECT * FROM t2;
+SELECT * FROM t1 order by id;
+id xy
+1 5
+5 0
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 1
+SELECT * FROM t4 order by id;
+id xy
+1 5
+5 0
+6 0
+SELECT * FROM t5 order by id;
+id xy
+1 5
+2 0
+5 1
+6 1
+DELETE FROM t1,t4 USING t1,t3,t4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 5) AND t4.id IN (SELECT id FROM t3 WHERE id = 5);
+SELECT * FROM t1 order by id;
+id xy
+1 5
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 0
+SELECT * FROM t4 order by id;
+id xy
+1 5
+6 0
+SELECT * FROM t5 order by id;
+id xy
+1 5
+2 0
+5 1
+6 0
+INSERT INTO t1 VALUES (5, 0);
+REPLACE INTO t2 VALUES (6,1);
+SELECT * FROM t1 order by id;
+id xy
+1 5
+5 0
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 1
+REPLACE INTO t1 VALUES (5, 1);
+SELECT * FROM t1 order by id;
+id xy
+1 5
+5 1
+6 0
+SELECT * FROM t2 order by id;
+id xy
+1 5
+2 0
+5 1
+6 0
+DROP TRIGGER t1_delete;
+DROP TRIGGER t4_delete;
+DROP TABLE t1, t2, t3, t4, t5;
End of 5.0 tests
diff --git a/mysql-test/t/ndb_trigger.test b/mysql-test/t/ndb_trigger.test
index 2521ef17842..25b079cfe7c 100644
--- a/mysql-test/t/ndb_trigger.test
+++ b/mysql-test/t/ndb_trigger.test
@@ -89,4 +89,112 @@ select * from t2 order by op, a, b;
drop tables t1, t2, t3;
+# Test for bug#26242
+# Verify that AFTER UPDATE/DELETE triggers are executed
+# after the change has actually taken place
+
+CREATE TABLE t1 (
+ id INT NOT NULL PRIMARY KEY,
+ xy INT
+) ENGINE=ndbcluster;
+
+INSERT INTO t1 VALUES (1, 0);
+
+DELIMITER //;
+CREATE TRIGGER t1_update AFTER UPDATE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id = NEW.id; END //
+DELIMITER ;//
+
+CREATE TABLE t2 (
+ id INT NOT NULL PRIMARY KEY,
+ xy INT
+) ENGINE=ndbcluster;
+
+INSERT INTO t2 VALUES (2, 0);
+
+CREATE TABLE t3 (id INT NOT NULL PRIMARY KEY) ENGINE=ndbcluster;
+
+INSERT INTO t3 VALUES (1);
+
+CREATE TABLE t4 LIKE t1;
+
+DELIMITER //;
+CREATE TRIGGER t4_update AFTER UPDATE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id = NEW.id; END //
+DELIMITER ;//
+
+CREATE TABLE t5 LIKE t2;
+
+UPDATE t1 SET xy = 3 WHERE id = 1;
+SELECT xy FROM t1 where id = 1;
+SELECT xy FROM t2 where id = 1;
+
+UPDATE t1 SET xy = 4 WHERE id IN (SELECT id FROM t3 WHERE id = 1);
+SELECT xy FROM t1 where id = 1;
+SELECT xy FROM t2 where id = 1;
+
+INSERT INTO t4 SELECT * FROM t1;
+INSERT INTO t5 SELECT * FROM t2;
+UPDATE t1,t4 SET t1.xy = 3, t4.xy = 3 WHERE t1.id = 1 AND t4.id = 1;
+SELECT xy FROM t1 where id = 1;
+SELECT xy FROM t2 where id = 1;
+SELECT xy FROM t4 where id = 1;
+SELECT xy FROM t5 where id = 1;
+
+UPDATE t1,t4 SET t1.xy = 4, t4.xy = 4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 1) AND t4.id IN (SELECT id FROM t3 WHERE id = 1);
+SELECT xy FROM t1 where id = 1;
+SELECT xy FROM t2 where id = 1;
+SELECT xy FROM t4 where id = 1;
+SELECT xy FROM t5 where id = 1;
+
+INSERT INTO t1 VALUES (1,0) ON DUPLICATE KEY UPDATE xy = 5;
+SELECT xy FROM t1 where id = 1;
+SELECT xy FROM t2 where id = 1;
+
+DROP TRIGGER t1_update;
+DROP TRIGGER t4_update;
+
+DELIMITER //;
+CREATE TRIGGER t1_delete AFTER DELETE ON t1 FOR EACH ROW BEGIN REPLACE INTO t2 SELECT * FROM t1 WHERE t1.id > 4; END //
+DELIMITER ;//
+
+DELIMITER //;
+CREATE TRIGGER t4_delete AFTER DELETE ON t4 FOR EACH ROW BEGIN REPLACE INTO t5 SELECT * FROM t4 WHERE t4.id > 4; END //
+DELIMITER ;//
+
+INSERT INTO t1 VALUES (5, 0),(6,0);
+INSERT INTO t2 VALUES (5, 1),(6,1);
+INSERT INTO t3 VALUES (5);
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+DELETE FROM t1 WHERE id IN (SELECT id FROM t3 WHERE id = 5);
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+
+INSERT INTO t1 VALUES (5,0);
+UPDATE t2 SET xy = 1 WHERE id = 6;
+TRUNCATE t4;
+INSERT INTO t4 SELECT * FROM t1;
+TRUNCATE t5;
+INSERT INTO t5 SELECT * FROM t2;
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+SELECT * FROM t4 order by id;
+SELECT * FROM t5 order by id;
+DELETE FROM t1,t4 USING t1,t3,t4 WHERE t1.id IN (SELECT id FROM t3 WHERE id = 5) AND t4.id IN (SELECT id FROM t3 WHERE id = 5);
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+SELECT * FROM t4 order by id;
+SELECT * FROM t5 order by id;
+
+INSERT INTO t1 VALUES (5, 0);
+REPLACE INTO t2 VALUES (6,1);
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+REPLACE INTO t1 VALUES (5, 1);
+SELECT * FROM t1 order by id;
+SELECT * FROM t2 order by id;
+
+DROP TRIGGER t1_delete;
+DROP TRIGGER t4_delete;
+DROP TABLE t1, t2, t3, t4, t5;
+
--echo End of 5.0 tests
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index d1d3484d432..9c5c10aa3dd 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2524,8 +2524,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
ERR_RETURN(op->getNdbError());
}
- // Execute update operation
- if (!cursor && execute_no_commit(this,trans,false) != 0) {
+ /*
+ Execute update operation if we are not doing a scan for update
+ and there exist UPDATE AFTER triggers
+ */
+
+ if ((!cursor || m_update_cannot_batch) &&
+ execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -2566,7 +2571,7 @@ int ha_ndbcluster::delete_row(const byte *record)
no_uncommitted_rows_update(-1);
- if (!m_primary_key_update)
+ if (!(m_primary_key_update || m_delete_cannot_batch))
// If deleting from cursor, NoCommit will be handled in next_result
DBUG_RETURN(0);
}
@@ -3277,6 +3282,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("HA_EXTRA_RESET"));
DBUG_PRINT("info", ("Clearing condition stack"));
cond_clear();
+ m_delete_cannot_batch= FALSE;
+ m_update_cannot_batch= FALSE;
break;
case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */
DBUG_PRINT("info", ("HA_EXTRA_CACHE"));
@@ -3393,6 +3400,14 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
break;
+ case HA_EXTRA_DELETE_CANNOT_BATCH:
+ DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH"));
+ m_delete_cannot_batch= TRUE;
+ break;
+ case HA_EXTRA_UPDATE_CANNOT_BATCH:
+ DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH"));
+ m_update_cannot_batch= TRUE;
+ break;
}
DBUG_RETURN(0);
@@ -4744,6 +4759,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_bulk_insert_rows((ha_rows) 1024),
m_rows_changed((ha_rows) 0),
m_bulk_insert_not_flushed(FALSE),
+ m_delete_cannot_batch(FALSE),
+ m_update_cannot_batch(FALSE),
m_ops_pending(0),
m_skip_auto_increment(TRUE),
m_blobs_pending(0),
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 5d66a7920f9..452192d83a0 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -774,6 +774,8 @@ bool uses_blob_value(bool all_fields);
ha_rows m_bulk_insert_rows;
ha_rows m_rows_changed;
bool m_bulk_insert_not_flushed;
+ bool m_delete_cannot_batch;
+ bool m_update_cannot_batch;
ha_rows m_ops_pending;
bool m_skip_auto_increment;
bool m_blobs_pending;
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 1636b5a31e4..9d45f2ee5e4 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -811,6 +811,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list);
+void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table,
+ enum_duplicates duplic);
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
enum_duplicates duplic);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 87a6575cc64..668a3d8786a 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -209,7 +209,19 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
thd->proc_info="updating";
if (table->triggers)
+ {
table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
+ if (table->triggers->has_triggers(TRG_EVENT_DELETE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER DELETE triggers that might access to subject table
+ and therefore might need delete to be done immediately. So we turn-off
+ the batching.
+ */
+ (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
+ }
+ }
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
@@ -526,7 +538,19 @@ multi_delete::initialize_tables(JOIN *join)
else
normal_tables= 1;
if (tbl->triggers)
+ {
tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
+ if (tbl->triggers->has_triggers(TRG_EVENT_DELETE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER DELETE triggers that might access to subject
+ table and therefore might need delete to be done immediately.
+ So we turn-off the batching.
+ */
+ (void) tbl->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
+ }
+ }
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 2ce83caa369..79e9988d8ce 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -280,6 +280,51 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
/*
+ Prepare triggers for INSERT-like statement.
+
+ SYNOPSIS
+ prepare_triggers_for_insert_stmt()
+ thd The current thread
+ table Table to which insert will happen
+ duplic Type of duplicate handling for insert which will happen
+
+ NOTE
+ Prepare triggers for INSERT-like statement by marking fields
+ used by triggers and inform handlers that batching of UPDATE/DELETE
+ cannot be done if there are BEFORE UPDATE/DELETE triggers.
+*/
+
+void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table,
+ enum_duplicates duplic)
+{
+ if (table->triggers)
+ {
+ if (table->triggers->has_triggers(TRG_EVENT_DELETE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER DELETE triggers that might access to
+ subject table and therefore might need delete to be done
+ immediately. So we turn-off the batching.
+ */
+ (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
+ }
+ if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER UPDATE triggers that might access to subject
+ table and therefore might need update to be done immediately.
+ So we turn-off the batching.
+ */
+ (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
+ }
+ mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
+ }
+}
+
+
+/*
Mark fields used by triggers for INSERT-like statement.
SYNOPSIS
@@ -513,7 +558,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
- mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
+ prepare_triggers_for_insert_stmt(thd, table, duplic);
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
@@ -2393,8 +2438,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table_list->prepare_check_option(thd));
if (!res)
- mark_fields_used_by_triggers_for_insert_stmt(thd, table,
- info.handle_duplicates);
+ prepare_triggers_for_insert_stmt(thd, table,
+ info.handle_duplicates);
DBUG_RETURN(res);
}
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index bdc08b7bd2d..857fe81c773 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -222,7 +222,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
- mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
+ prepare_triggers_for_insert_stmt(thd, table, handle_duplicates);
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index 7e0fadfa677..d2e3c52eef4 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -110,6 +110,11 @@ public:
const char *old_table,
const char *new_db,
const char *new_table);
+ bool has_triggers(trg_event_type event_type,
+ trg_action_time_type action_time)
+ {
+ return (bodies[event_type][action_time]);
+ }
bool has_delete_triggers()
{
return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index d431b671f18..8ec4fcbbc4d 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -435,7 +435,19 @@ int mysql_update(THD *thd,
MODE_STRICT_ALL_TABLES)));
if (table->triggers)
+ {
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+ if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER UPDATE triggers that might access to subject
+ table and therefore might need update to be done immediately.
+ So we turn-off the batching.
+ */
+ (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
+ }
+ }
/*
We can use compare_record() to optimize away updates if
@@ -1000,6 +1012,20 @@ int multi_update::prepare(List<Item> &not_used_values,
table->no_keyread=1;
table->used_keys.clear_all();
table->pos_in_table_list= tl;
+ if (table->triggers)
+ {
+ table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+ if (table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER))
+ {
+ /*
+ The table has AFTER UPDATE triggers that might access to subject
+ table and therefore might need update to be done immediately.
+ So we turn-off the batching.
+ */
+ (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
+ }
+ }
}
}