summaryrefslogtreecommitdiff
path: root/sql/sql_class.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_class.cc')
-rw-r--r--sql/sql_class.cc84
1 files changed, 83 insertions, 1 deletions
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 21df4640f3f..566d4e9d2a2 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -174,7 +174,7 @@ THD::THD()
:Statement(CONVENTIONAL_EXECUTION, 0, ALLOC_ROOT_MIN_BLOCK_SIZE, 0),
Open_tables_state(refresh_version),
lock_id(&main_lock_id),
- user_time(0), in_sub_stmt(FALSE), global_read_lock(0), is_fatal_error(0),
+ user_time(0), in_sub_stmt(0), global_read_lock(0), is_fatal_error(0),
rand_used(0), time_zone_used(0),
last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0),
in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE),
@@ -1836,3 +1836,85 @@ void THD::restore_backup_open_tables_state(Open_tables_state *backup)
set_open_tables_state(backup);
DBUG_VOID_RETURN;
}
+
+
+/****************************************************************************
+ Handling of statement states in functions and triggers.
+
+ This is used to ensure that the function/trigger gets a clean state
+ to work with and does not cause any side effects of the calling statement.
+
+ It also allows most stored functions and triggers to replicate even
+ if they are used items that would normally be stored in the binary
+ replication (like last_insert_id() etc...)
+
+ The following things is done
+ - Disable binary logging for the duration of the statement
+ - Disable multi-result-sets for the duration of the statement
+ - Value of last_insert_id() is reset and restored
+ - Value set by 'SET INSERT_ID=#' is reset and restored
+ - Value for found_rows() is reset and restored
+ - examined_row_count is added to the total
+ - cuted_fields is added to the total
+
+ NOTES:
+ Seed for random() is saved for the first! usage of RAND()
+ We reset examined_row_count and cuted_fields and add these to the
+ result to ensure that if we have a bug that would reset these within
+ a function, we are not loosing any rows from the main statement.
+****************************************************************************/
+
+void THD::reset_sub_statement_state(Sub_statement_state *backup,
+ uint new_state)
+{
+ backup->options= options;
+ backup->in_sub_stmt= in_sub_stmt;
+ backup->no_send_ok= net.no_send_ok;
+ backup->enable_slow_log= enable_slow_log;
+ backup->last_insert_id= last_insert_id;
+ backup->next_insert_id= next_insert_id;
+ backup->insert_id_used= insert_id_used;
+ backup->limit_found_rows= limit_found_rows;
+ backup->examined_row_count= examined_row_count;
+ backup->sent_row_count= sent_row_count;
+ backup->cuted_fields= cuted_fields;
+ backup->client_capabilities= client_capabilities;
+
+ options&= ~OPTION_BIN_LOG;
+ /* Disable result sets */
+ client_capabilities &= ~CLIENT_MULTI_RESULTS;
+ in_sub_stmt|= new_state;
+ last_insert_id= 0;
+ next_insert_id= 0;
+ insert_id_used= 0;
+ examined_row_count= 0;
+ sent_row_count= 0;
+ cuted_fields= 0;
+
+#ifndef EMBEDDED_LIBRARY
+ /* Surpress OK packets in case if we will execute statements */
+ net.no_send_ok= TRUE;
+#endif
+}
+
+
+void THD::restore_sub_statement_state(Sub_statement_state *backup)
+{
+ options= backup->options;
+ in_sub_stmt= backup->in_sub_stmt;
+ net.no_send_ok= backup->no_send_ok;
+ enable_slow_log= backup->enable_slow_log;
+ last_insert_id= backup->last_insert_id;
+ next_insert_id= backup->next_insert_id;
+ insert_id_used= backup->insert_id_used;
+ limit_found_rows= backup->limit_found_rows;
+ sent_row_count= backup->sent_row_count;
+ client_capabilities= backup->client_capabilities;
+
+ /*
+ The following is added to the old values as we are interested in the
+ total complexity of the query
+ */
+ examined_row_count+= backup->examined_row_count;
+ cuted_fields+= backup->cuted_fields;
+}