diff options
author | unknown <mskold/marty@mysql.com/linux.site> | 2007-04-05 08:39:12 +0200 |
---|---|---|
committer | unknown <mskold/marty@mysql.com/linux.site> | 2007-04-05 08:39:12 +0200 |
commit | 499d058c2d4a3a21ece73ccfccebca62672a6517 (patch) | |
tree | 65f485d2be885f33d90600d09460d7d0e3505d99 /sql | |
parent | 865cd0108a718d0231daf98367f64fce0c46d3f3 (diff) | |
parent | cfe2d1da886ea48c445076a3509b76ea1242e54d (diff) | |
download | mariadb-git-499d058c2d4a3a21ece73ccfccebca62672a6517.tar.gz |
Merge mysql.com:/windows/Linux_space/MySQL/mysql-5.0
into mysql.com:/windows/Linux_space/MySQL/mysql-5.0-ndb
include/my_base.h:
Auto merged
sql/ha_ndbcluster.h:
Auto merged
sql/mysql_priv.h:
Auto merged
sql/sql_delete.cc:
Auto merged
sql/sql_insert.cc:
Auto merged
sql/sql_load.cc:
Auto merged
sql/sql_trigger.h:
Auto merged
sql/sql_update.cc:
Auto merged
sql/ha_ndbcluster.cc:
Merge
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_ndbcluster.cc | 25 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 2 | ||||
-rw-r--r-- | sql/mysql_priv.h | 2 | ||||
-rw-r--r-- | sql/sql_delete.cc | 24 | ||||
-rw-r--r-- | sql/sql_insert.cc | 51 | ||||
-rw-r--r-- | sql/sql_load.cc | 2 | ||||
-rw-r--r-- | sql/sql_trigger.h | 5 | ||||
-rw-r--r-- | sql/sql_update.cc | 26 |
8 files changed, 130 insertions, 7 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 84352d75ffe..7de9ef125b1 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -2521,8 +2521,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) ERR_RETURN(op->getNdbError()); } - // Execute update operation - if (!cursor && execute_no_commit(this,trans,false) != 0) { + /* + Execute update operation if we are not doing a scan for update + and there exist UPDATE AFTER triggers + */ + + if ((!cursor || m_update_cannot_batch) && + execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2563,7 +2568,7 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (!m_primary_key_update) + if (!(m_primary_key_update || m_delete_cannot_batch)) // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } @@ -3404,6 +3409,16 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); m_use_write= FALSE; break; + case HA_EXTRA_DELETE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); + m_delete_cannot_batch= TRUE; + break; + case HA_EXTRA_UPDATE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); + m_update_cannot_batch= TRUE; + break; + default: + break; } DBUG_RETURN(0); @@ -3420,6 +3435,8 @@ int ha_ndbcluster::reset() m_retrieve_primary_key= FALSE; m_ignore_dup_key= FALSE; m_use_write= FALSE; + m_delete_cannot_batch= FALSE; + m_update_cannot_batch= FALSE; DBUG_RETURN(0); } @@ -4786,6 +4803,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_bulk_insert_rows((ha_rows) 1024), m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), + m_delete_cannot_batch(FALSE), + m_update_cannot_batch(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), m_blobs_pending(0), diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 6b49b0e3c70..3495f35e10f 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -774,6 +774,8 @@ bool uses_blob_value(bool all_fields); ha_rows m_bulk_insert_rows; ha_rows m_rows_changed; bool m_bulk_insert_not_flushed; + bool m_delete_cannot_batch; + bool m_update_cannot_batch; ha_rows m_ops_pending; bool m_skip_auto_increment; bool m_blobs_pending; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index de567eacbeb..39f115f6fd5 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -844,6 +844,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, bool ignore); int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, TABLE_LIST *table_list); +void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic); void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table, enum_duplicates duplic); bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e653324d9bf..19f3135c594 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -217,7 +217,19 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, thd->proc_info="updating"; if (table->triggers) + { table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE); + if (table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject table + and therefore might need delete to be done immediately. So we turn-off + the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } + } while (!(error=info.read_record(&info)) && !thd->killed && !thd->net.report_error) @@ -540,7 +552,19 @@ multi_delete::initialize_tables(JOIN *join) else normal_tables= 1; if (tbl->triggers) + { tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE); + if (tbl->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject + table and therefore might need delete to be done immediately. + So we turn-off the batching. + */ + (void) tbl->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } + } } else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) && walk == delete_tables) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 0fa027f89d6..955f83de25e 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -330,6 +330,51 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, /* + Prepare triggers for INSERT-like statement. + + SYNOPSIS + prepare_triggers_for_insert_stmt() + thd The current thread + table Table to which insert will happen + duplic Type of duplicate handling for insert which will happen + + NOTE + Prepare triggers for INSERT-like statement by marking fields + used by triggers and inform handlers that batching of UPDATE/DELETE + cannot be done if there are BEFORE UPDATE/DELETE triggers. +*/ + +void prepare_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic) +{ + if (table->triggers) + { + if (table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to + subject table and therefore might need delete to be done + immediately. So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic); + } +} + + +/* Mark fields used by triggers for INSERT-like statement. SYNOPSIS @@ -589,7 +634,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic); + prepare_triggers_for_insert_stmt(thd, table, duplic); if (table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)) @@ -2528,8 +2573,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) table_list->prepare_check_option(thd)); if (!res) - mark_fields_used_by_triggers_for_insert_stmt(thd, table, - info.handle_duplicates); + prepare_triggers_for_insert_stmt(thd, table, + info.handle_duplicates); DBUG_RETURN(res); } diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 7a535381c01..6984da6a0b0 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -222,7 +222,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, DBUG_RETURN(TRUE); } - mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates); + prepare_triggers_for_insert_stmt(thd, table, handle_duplicates); uint tot_length=0; bool use_blobs= 0, use_vars= 0; diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 19b2b24a3fe..b029a70ca20 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -110,6 +110,11 @@ public: const char *old_table, const char *new_db, const char *new_table); + bool has_triggers(trg_event_type event_type, + trg_action_time_type action_time) + { + return (bodies[event_type][action_time] != NULL); + } bool has_delete_triggers() { return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 27d38114885..e7e54d8ca1d 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -436,7 +436,19 @@ int mysql_update(THD *thd, MODE_STRICT_ALL_TABLES))); if (table->triggers) + { table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + } /* We can use compare_record() to optimize away updates if @@ -1001,6 +1013,20 @@ int multi_update::prepare(List<Item> ¬_used_values, table->no_keyread=1; table->used_keys.clear_all(); table->pos_in_table_list= tl; + if (table->triggers) + { + table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + } } } |