diff options
author | unknown <mskold/marty@linux.site> | 2007-04-04 13:21:49 +0200 |
---|---|---|
committer | unknown <mskold/marty@linux.site> | 2007-04-04 13:21:49 +0200 |
commit | 655a58d85c5b83824ceec5a7041bbb3b84b5731a (patch) | |
tree | 625030adbf3bf1a60ae1260b13c5cfa9ce655ef6 /sql | |
parent | 5abe2fdba4823370b50495ca1f926a514124785c (diff) | |
parent | 2efc0f51cf3e80cd49c1ea0dee9a440936ee6287 (diff) | |
download | mariadb-git-655a58d85c5b83824ceec5a7041bbb3b84b5731a.tar.gz |
Merge mysql.com:/windows/Linux_space/MySQL/mysql-5.0
into mysql.com:/windows/Linux_space/MySQL/mysql-5.1
sql/ha_ndbcluster.h:
Auto merged
sql/sql_trigger.h:
Auto merged
mysql-test/r/ndb_trigger.result:
Using local, will re-generate.
include/my_base.h:
Merge
mysql-test/t/ndb_trigger.test:
Merge
sql/ha_ndbcluster.cc:
Merge
sql/mysql_priv.h:
Merge
sql/sql_delete.cc:
Merge
sql/sql_insert.cc:
Merge
sql/sql_load.cc:
Merge
sql/sql_update.cc:
Merge
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_ndbcluster.cc | 23 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 2 | ||||
-rw-r--r-- | sql/mysql_priv.h | 2 | ||||
-rw-r--r-- | sql/sql_delete.cc | 26 | ||||
-rw-r--r-- | sql/sql_insert.cc | 45 | ||||
-rw-r--r-- | sql/sql_trigger.h | 5 | ||||
-rw-r--r-- | sql/sql_update.cc | 29 |
7 files changed, 126 insertions, 6 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index c363cddb8ba..cad41d9de99 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2992,8 +2992,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) no_fields++; op->setValue(no_fields, part_func_value); } - // Execute update operation - if (!cursor && execute_no_commit(this,trans,FALSE) != 0) { + /* + Execute update operation if we are not doing a scan for update + and there exist UPDATE AFTER triggers + */ + + if ((!cursor || m_update_cannot_batch) && + execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3047,7 +3052,7 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (!m_primary_key_update) + if (!(m_primary_key_update || m_delete_cannot_batch)) // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } @@ -3886,7 +3891,13 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); m_use_write= FALSE; break; - default: + case HA_EXTRA_DELETE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); + m_delete_cannot_batch= TRUE; + break; + case HA_EXTRA_UPDATE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); + m_update_cannot_batch= TRUE; break; } @@ -3897,6 +3908,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) int ha_ndbcluster::reset() { DBUG_ENTER("ha_ndbcluster::reset"); + m_delete_cannot_batch= FALSE; + m_update_cannot_batch= FALSE; cond_clear(); /* Regular partition pruning will set the bitmap appropriately. @@ -5826,6 +5839,8 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): m_bulk_insert_rows((ha_rows) 1024), m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), + m_delete_cannot_batch(FALSE), + m_update_cannot_batch(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), m_blobs_pending(0), diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 63665fde0f8..6dc32bdee1d 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -960,6 +960,8 @@ private: ha_rows m_bulk_insert_rows; ha_rows m_rows_changed; bool m_bulk_insert_not_flushed; + bool m_delete_cannot_batch; + bool m_update_cannot_batch; ha_rows m_ops_pending; bool m_skip_auto_increment; bool m_blobs_pending; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index e8398e5ebb2..b3a7afd449a 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -974,6 +974,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, bool ignore); int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, TABLE_LIST *table_list); +void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic); bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, ha_rows rows, ulonglong options, diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index ea8c0e2d83e..422b885d7bd 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -216,7 +216,20 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, init_ftfuncs(thd, select_lex, 1); thd->proc_info="updating"; - will_batch= !table->file->start_bulk_delete(); + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject table + and therefore might need delete to be done immediately. So we turn-off + the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + will_batch= FALSE; + } + else + will_batch= !table->file->start_bulk_delete(); table->mark_columns_needed_for_delete(); @@ -552,6 +565,17 @@ multi_delete::initialize_tables(JOIN *join) transactional_tables= 1; else normal_tables= 1; + if (tbl->triggers && + tbl->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject + table and therefore might need delete to be done immediately. + So we turn-off the batching. + */ + (void) tbl->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } tbl->prepare_for_position(); tbl->mark_columns_needed_for_delete(); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index aaffa09b978..af239c1f62b 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -341,6 +341,51 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, } + Prepare triggers for INSERT-like statement. + + SYNOPSIS + prepare_triggers_for_insert_stmt() + thd The current thread + table Table to which insert will happen + duplic Type of duplicate handling for insert which will happen + + NOTE + Prepare triggers for INSERT-like statement by marking fields + used by triggers and inform handlers that batching of UPDATE/DELETE + cannot be done if there are BEFORE UPDATE/DELETE triggers. +*/ + +void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic) +{ + if (table->triggers) + { + if (table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to + subject table and therefore might need delete to be done + immediately. So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic); + } +} + + +/* bool mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, List<List_item> &values_list, diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 75dda6be1cf..2be42cbccac 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -110,6 +110,11 @@ public: const char *old_table, const char *new_db, const char *new_table); + bool has_triggers(trg_event_type event_type, + trg_action_time_type action_time) + { + return (bodies[event_type][action_time]); + } bool has_delete_triggers() { return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || diff --git a/sql/sql_update.cc b/sql/sql_update.cc index baccb3358f7..7c25f21c14d 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -447,7 +447,20 @@ int mysql_update(THD *thd, (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - will_batch= !table->file->start_bulk_update(); + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + will_batch= FALSE; + } + else + will_batch= !table->file->start_bulk_update(); /* We can use compare_record() to optimize away updates if @@ -1116,6 +1129,20 @@ int multi_update::prepare(List<Item> ¬_used_values, table->no_keyread=1; table->used_keys.clear_all(); table->pos_in_table_list= tl; + if (table->triggers) + { + table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + } } } |