summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <mkindahl@dl145h.mysql.com>2007-11-21 21:11:57 +0100
committerunknown <mkindahl@dl145h.mysql.com>2007-11-21 21:11:57 +0100
commitbb685d188f1501834669ffc32841823edfc19450 (patch)
treed6d276ce156d5e7b36b11418a634339486f9c098 /sql
parent0bdc06e88a9cfc170edd26ceca5c5164dc2e75be (diff)
parentccce6b288f69e993d717d0fdc7e663ed03f5c7a5 (diff)
downloadmariadb-git-bb685d188f1501834669ffc32841823edfc19450.tar.gz
Merge dl145h.mysql.com:/data0/mkindahl/mysql-5.0
into dl145h.mysql.com:/data0/mkindahl/mysql-5.0-rpl-merge client/mysql.cc: Auto merged mysql-test/r/ctype_ucs.result: Auto merged mysql-test/t/ctype_uca.test: Auto merged mysql-test/t/ctype_ucs.test: Auto merged mysql-test/t/subselect.test: Auto merged sql/item_cmpfunc.cc: Auto merged sql/mysqld.cc: Auto merged sql/sql_class.h: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/item_cmpfunc.cc122
-rw-r--r--sql/item_cmpfunc.h4
-rw-r--r--sql/log.cc11
-rw-r--r--sql/log_event.cc173
-rw-r--r--sql/log_event.h10
-rw-r--r--sql/mysqld.cc1
-rw-r--r--sql/slave.cc69
-rw-r--r--sql/sp_head.cc5
-rw-r--r--sql/sql_class.h18
-rw-r--r--sql/sql_delete.cc51
-rw-r--r--sql/sql_insert.cc81
-rw-r--r--sql/sql_load.cc28
-rw-r--r--sql/sql_parse.cc18
-rw-r--r--sql/sql_repl.cc10
-rw-r--r--sql/sql_update.cc94
15 files changed, 495 insertions, 200 deletions
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 85ec8fa40d6..6bdba0f5a8a 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -4281,6 +4281,51 @@ void Item_func_like::cleanup()
#ifdef USE_REGEX
bool
+Item_func_regex::regcomp(bool send_error)
+{
+ char buff[MAX_FIELD_WIDTH];
+ String tmp(buff,sizeof(buff),&my_charset_bin);
+ String *res= args[1]->val_str(&tmp);
+ int error;
+
+ if (args[1]->null_value)
+ return TRUE;
+
+ if (regex_compiled)
+ {
+ if (!stringcmp(res, &prev_regexp))
+ return FALSE;
+ prev_regexp.copy(*res);
+ my_regfree(&preg);
+ regex_compiled= 0;
+ }
+
+ if (cmp_collation.collation != regex_lib_charset)
+ {
+ /* Convert UCS2 strings to UTF8 */
+ uint dummy_errors;
+ if (conv.copy(res->ptr(), res->length(), res->charset(),
+ regex_lib_charset, &dummy_errors))
+ return TRUE;
+ res= &conv;
+ }
+
+ if ((error= my_regcomp(&preg, res->c_ptr_safe(),
+ regex_lib_flags, regex_lib_charset)))
+ {
+ if (send_error)
+ {
+ (void) my_regerror(error, &preg, buff, sizeof(buff));
+ my_error(ER_REGEXP_ERROR, MYF(0), buff);
+ }
+ return TRUE;
+ }
+ regex_compiled= 1;
+ return FALSE;
+}
+
+
+bool
Item_func_regex::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
@@ -4296,35 +4341,34 @@ Item_func_regex::fix_fields(THD *thd, Item **ref)
if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1))
return TRUE;
+ regex_lib_flags= (cmp_collation.collation->state &
+ (MY_CS_BINSORT | MY_CS_CSSORT)) ?
+ REG_EXTENDED | REG_NOSUB :
+ REG_EXTENDED | REG_NOSUB | REG_ICASE;
+ /*
+ If the case of UCS2 and other non-ASCII character sets,
+ we will convert patterns and strings to UTF8.
+ */
+ regex_lib_charset= (cmp_collation.collation->mbminlen > 1) ?
+ &my_charset_utf8_general_ci :
+ cmp_collation.collation;
+
used_tables_cache=args[0]->used_tables() | args[1]->used_tables();
not_null_tables_cache= (args[0]->not_null_tables() |
args[1]->not_null_tables());
const_item_cache=args[0]->const_item() && args[1]->const_item();
if (!regex_compiled && args[1]->const_item())
{
- char buff[MAX_FIELD_WIDTH];
- String tmp(buff,sizeof(buff),&my_charset_bin);
- String *res=args[1]->val_str(&tmp);
if (args[1]->null_value)
{ // Will always return NULL
maybe_null=1;
fixed= 1;
return FALSE;
}
- int error;
- if ((error= my_regcomp(&preg,res->c_ptr(),
- ((cmp_collation.collation->state &
- (MY_CS_BINSORT | MY_CS_CSSORT)) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE),
- cmp_collation.collation)))
- {
- (void) my_regerror(error,&preg,buff,sizeof(buff));
- my_error(ER_REGEXP_ERROR, MYF(0), buff);
+ if (regcomp(TRUE))
return TRUE;
- }
- regex_compiled=regex_is_const=1;
- maybe_null=args[0]->maybe_null;
+ regex_is_const= 1;
+ maybe_null= args[0]->maybe_null;
}
else
maybe_null=1;
@@ -4337,47 +4381,25 @@ longlong Item_func_regex::val_int()
{
DBUG_ASSERT(fixed == 1);
char buff[MAX_FIELD_WIDTH];
- String *res, tmp(buff,sizeof(buff),&my_charset_bin);
+ String tmp(buff,sizeof(buff),&my_charset_bin);
+ String *res= args[0]->val_str(&tmp);
- res=args[0]->val_str(&tmp);
- if (args[0]->null_value)
- {
- null_value=1;
+ if ((null_value= (args[0]->null_value ||
+ (!regex_is_const && regcomp(FALSE)))))
return 0;
- }
- if (!regex_is_const)
- {
- char buff2[MAX_FIELD_WIDTH];
- String *res2, tmp2(buff2,sizeof(buff2),&my_charset_bin);
- res2= args[1]->val_str(&tmp2);
- if (args[1]->null_value)
+ if (cmp_collation.collation != regex_lib_charset)
+ {
+ /* Convert UCS2 strings to UTF8 */
+ uint dummy_errors;
+ if (conv.copy(res->ptr(), res->length(), res->charset(),
+ regex_lib_charset, &dummy_errors))
{
- null_value=1;
+ null_value= 1;
return 0;
}
- if (!regex_compiled || stringcmp(res2,&prev_regexp))
- {
- prev_regexp.copy(*res2);
- if (regex_compiled)
- {
- my_regfree(&preg);
- regex_compiled=0;
- }
- if (my_regcomp(&preg,res2->c_ptr_safe(),
- ((cmp_collation.collation->state &
- (MY_CS_BINSORT | MY_CS_CSSORT)) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE),
- cmp_collation.collation))
- {
- null_value=1;
- return 0;
- }
- regex_compiled=1;
- }
+ res= &conv;
}
- null_value=0;
return my_regexec(&preg,res->c_ptr_safe(),0,(my_regmatch_t*) 0,0) ? 0 : 1;
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 4d3df7aebf9..735081d931f 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1318,6 +1318,10 @@ class Item_func_regex :public Item_bool_func
bool regex_is_const;
String prev_regexp;
DTCollation cmp_collation;
+ CHARSET_INFO *regex_lib_charset;
+ int regex_lib_flags;
+ String conv;
+ bool regcomp(bool send_error);
public:
Item_func_regex(Item *a,Item *b) :Item_bool_func(a,b),
regex_compiled(0),regex_is_const(0) {}
diff --git a/sql/log.cc b/sql/log.cc
index e9aa273676a..af03cecd462 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -448,13 +448,10 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
{
if (!log_name || !log_name[0])
{
- /*
- TODO: The following should be using fn_format(); We just need to
- first change fn_format() to cut the file name if it's too long.
- */
- strmake(buff, pidfile_name,FN_REFLEN-5);
- strmov(fn_ext(buff),suffix);
- return (const char *)buff;
+ strmake(buff, pidfile_name, FN_REFLEN - strlen(suffix) - 1);
+ return (const char *)
+ fn_format(buff, buff, "", suffix, MYF(MY_REPLACE_EXT|MY_REPLACE_DIR));
+
}
// get rid of extension if the log is binary to avoid problems
if (strip_ext)
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 1ef765f607f..d22973d12a3 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -27,6 +27,10 @@
#define log_cs &my_charset_latin1
+#ifndef DBUG_OFF
+uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation
+#endif
+
/*
pretty_print_str()
*/
@@ -481,6 +485,18 @@ int Log_event::exec_event(struct st_relay_log_info* rli)
rli->inc_event_relay_log_pos();
else
{
+ /*
+ bug#29309 simulation: resetting the flag to force
+ wrong behaviour of artificial event to update
+ rli->last_master_timestamp for only one time -
+ the first FLUSH LOGS in the test.
+ */
+ DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
+ if (debug_not_change_ts_if_art_event == 1
+ && is_artificial_event())
+ {
+ debug_not_change_ts_if_art_event= 0;
+ });
rli->inc_group_relay_log_pos(log_pos);
flush_relay_log_info(rli);
/*
@@ -491,7 +507,21 @@ int Log_event::exec_event(struct st_relay_log_info* rli)
rare cases, only consequence is that value may take some time to
display in Seconds_Behind_Master - not critical).
*/
- rli->last_master_timestamp= when;
+#ifndef DBUG_OFF
+ if (!(is_artificial_event() && debug_not_change_ts_if_art_event > 0))
+#else
+ if (!is_artificial_event())
+#endif
+ rli->last_master_timestamp= when;
+ /*
+ The flag is set back to be positive so that
+ any further FLUSH LOGS will be handled as prescribed.
+ */
+ DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
+ if (debug_not_change_ts_if_art_event == 0)
+ {
+ debug_not_change_ts_if_art_event= 2;
+ });
}
}
DBUG_RETURN(0);
@@ -1370,18 +1400,48 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
/* 2 utility functions for the next method */
-/*
- Get the pointer for a string (src) that contains the length in
- the first byte. Set the output string (dst) to the string value
- and place the length of the string in the byte after the string.
+/**
+ Read a string with length from memory.
+
+ This function reads the string-with-length stored at
+ <code>src</code> and extract the length into <code>*len</code> and
+ a pointer to the start of the string into <code>*dst</code>. The
+ string can then be copied using <code>memcpy()</code> with the
+ number of bytes given in <code>*len</code>.
+
+ @param src Pointer to variable holding a pointer to the memory to
+ read the string from.
+ @param dst Pointer to variable holding a pointer where the actual
+ string starts. Starting from this position, the string
+ can be copied using @c memcpy().
+ @param len Pointer to variable where the length will be stored.
+ @param end One-past-the-end of the memory where the string is
+ stored.
+
+ @return Zero if the entire string can be copied successfully,
+ @c UINT_MAX if the length could not be read from memory
+ (that is, if <code>*src >= end</code>), otherwise the
+ number of bytes that are missing to read the full
+ string, which happends <code>*dst + *len >= end</code>.
*/
-static void get_str_len_and_pointer(const Log_event::Byte **src,
- const char **dst,
- uint *len)
-{
- if ((*len= **src))
- *dst= (char *)*src + 1; // Will be copied later
- (*src)+= *len + 1;
+static int
+get_str_len_and_pointer(const Log_event::Byte **src,
+ const char **dst,
+ uint *len,
+ const Log_event::Byte *end)
+{
+ if (*src >= end)
+ return -1; // Will be UINT_MAX in two-complement arithmetics
+ uint length= **src;
+ if (length > 0)
+ {
+ if (*src + length >= end)
+ return *src + length - end + 1; // Number of bytes missing
+ *dst= (char *)*src + 1; // Will be copied later
+ }
+ *len= length;
+ *src+= length + 1;
+ return 0;
}
static void copy_str_and_move(const char **src,
@@ -1394,6 +1454,46 @@ static void copy_str_and_move(const char **src,
*(*dst)++= 0;
}
+
+#ifndef DBUG_OFF
+static char const *
+code_name(int code)
+{
+ static char buf[255];
+ switch (code) {
+ case Q_FLAGS2_CODE: return "Q_FLAGS2_CODE";
+ case Q_SQL_MODE_CODE: return "Q_SQL_MODE_CODE";
+ case Q_CATALOG_CODE: return "Q_CATALOG_CODE";
+ case Q_AUTO_INCREMENT: return "Q_AUTO_INCREMENT";
+ case Q_CHARSET_CODE: return "Q_CHARSET_CODE";
+ case Q_TIME_ZONE_CODE: return "Q_TIME_ZONE_CODE";
+ case Q_CATALOG_NZ_CODE: return "Q_CATALOG_NZ_CODE";
+ case Q_LC_TIME_NAMES_CODE: return "Q_LC_TIME_NAMES_CODE";
+ case Q_CHARSET_DATABASE_CODE: return "Q_CHARSET_DATABASE_CODE";
+ }
+ sprintf(buf, "CODE#%d", code);
+ return buf;
+}
+#endif
+
+/**
+ Macro to check that there is enough space to read from memory.
+
+ @param PTR Pointer to memory
+ @param END End of memory
+ @param CNT Number of bytes that should be read.
+ */
+#define CHECK_SPACE(PTR,END,CNT) \
+ do { \
+ DBUG_PRINT("info", ("Read %s", code_name(pos[-1]))); \
+ DBUG_ASSERT((PTR) + (CNT) <= (END)); \
+ if ((PTR) + (CNT) > (END)) { \
+ DBUG_PRINT("info", ("query= 0")); \
+ query= 0; \
+ DBUG_VOID_RETURN; \
+ } \
+ } while (0)
+
/*
Query_log_event::Query_log_event()
This is used by the SQL slave thread to prepare the event before execution.
@@ -1445,6 +1545,19 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
if (tmp)
{
status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
+ /*
+ Check if status variable length is corrupt and will lead to very
+ wrong data. We could be even more strict and require data_len to
+ be even bigger, but this will suffice to catch most corruption
+ errors that can lead to a crash.
+ */
+ if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS))
+ {
+ DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
+ status_vars_len, data_len));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
data_len-= status_vars_len;
DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
(uint) status_vars_len));
@@ -1464,6 +1577,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
{
switch (*pos++) {
case Q_FLAGS2_CODE:
+ CHECK_SPACE(pos, end, 4);
flags2_inited= 1;
flags2= uint4korr(pos);
DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2));
@@ -1474,6 +1588,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
#ifndef DBUG_OFF
char buff[22];
#endif
+ CHECK_SPACE(pos, end, 8);
sql_mode_inited= 1;
sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong
DBUG_PRINT("info",("In Query_log_event, read sql_mode: %s",
@@ -1482,15 +1597,24 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
break;
}
case Q_CATALOG_NZ_CODE:
- get_str_len_and_pointer(&pos, &catalog, &catalog_len);
+ DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos: 0x%lx; end: 0x%lx",
+ (ulong) pos, (ulong) end));
+ if (get_str_len_and_pointer(&pos, &catalog, &catalog_len, end))
+ {
+ DBUG_PRINT("info", ("query= 0"));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
break;
case Q_AUTO_INCREMENT:
+ CHECK_SPACE(pos, end, 4);
auto_increment_increment= uint2korr(pos);
auto_increment_offset= uint2korr(pos+2);
pos+= 4;
break;
case Q_CHARSET_CODE:
{
+ CHECK_SPACE(pos, end, 6);
charset_inited= 1;
memcpy(charset, pos, 6);
pos+= 6;
@@ -1498,20 +1622,29 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
}
case Q_TIME_ZONE_CODE:
{
- get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len);
+ if (get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len, end))
+ {
+ DBUG_PRINT("info", ("Q_TIME_ZONE_CODE: query= 0"));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
break;
}
case Q_CATALOG_CODE: /* for 5.0.x where 0<=x<=3 masters */
+ CHECK_SPACE(pos, end, 1);
if ((catalog_len= *pos))
catalog= (char*) pos+1; // Will be copied later
+ CHECK_SPACE(pos, end, catalog_len + 2);
pos+= catalog_len+2; // leap over end 0
catalog_nz= 0; // catalog has end 0 in event
break;
case Q_LC_TIME_NAMES_CODE:
+ CHECK_SPACE(pos, end, 2);
lc_time_names_number= uint2korr(pos);
pos+= 2;
break;
case Q_CHARSET_DATABASE_CODE:
+ CHECK_SPACE(pos, end, 2);
charset_database_number= uint2korr(pos);
pos+= 2;
break;
@@ -2021,6 +2154,7 @@ end:
*/
thd->catalog= 0;
thd->set_db(NULL, 0); /* will free the current database */
+ DBUG_PRINT("info", ("end: query= 0"));
thd->query= 0; // just to be sure
thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
@@ -4964,12 +5098,13 @@ int Begin_load_query_log_event::get_create_or_append() const
#ifndef MYSQL_CLIENT
Execute_load_query_log_event::
Execute_load_query_log_event(THD* thd_arg, const char* query_arg,
- ulong query_length_arg, uint fn_pos_start_arg,
- uint fn_pos_end_arg,
- enum_load_dup_handling dup_handling_arg,
- bool using_trans, bool suppress_use):
+ ulong query_length_arg, uint fn_pos_start_arg,
+ uint fn_pos_end_arg,
+ enum_load_dup_handling dup_handling_arg,
+ bool using_trans, bool suppress_use,
+ THD::killed_state killed_err_arg):
Query_log_event(thd_arg, query_arg, query_length_arg, using_trans,
- suppress_use),
+ suppress_use, killed_err_arg),
file_id(thd_arg->file_id), fn_pos_start(fn_pos_start_arg),
fn_pos_end(fn_pos_end_arg), dup_handling(dup_handling_arg)
{
diff --git a/sql/log_event.h b/sql/log_event.h
index 04aac5d08fc..5b065a33dd1 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -1619,10 +1619,12 @@ public:
#ifndef MYSQL_CLIENT
Execute_load_query_log_event(THD* thd, const char* query_arg,
- ulong query_length, uint fn_pos_start_arg,
- uint fn_pos_end_arg,
- enum_load_dup_handling dup_handling_arg,
- bool using_trans, bool suppress_use);
+ ulong query_length, uint fn_pos_start_arg,
+ uint fn_pos_end_arg,
+ enum_load_dup_handling dup_handling_arg,
+ bool using_trans, bool suppress_use,
+ THD::killed_state
+ killed_err_arg= THD::KILLED_NO_VALUE);
#ifdef HAVE_REPLICATION
void pack_info(Protocol* protocol);
int exec_event(struct st_relay_log_info* rli);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 4c459d34a55..f8705a773c6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2862,7 +2862,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
global_system_variables.collation_connection= default_charset_info;
global_system_variables.character_set_results= default_charset_info;
global_system_variables.character_set_client= default_charset_info;
- global_system_variables.collation_connection= default_charset_info;
if (!(character_set_filesystem=
get_charset_by_csname(character_set_filesystem_name,
diff --git a/sql/slave.cc b/sql/slave.cc
index c1b0d655bea..1509916fe91 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -3279,7 +3279,43 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
now the relay log starts with its Format_desc, has a Rotate etc).
*/
- DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id));
+ DBUG_PRINT("info",("type_code: %d; server_id: %d; slave_skip_counter: %d",
+ type_code, ev->server_id, rli->slave_skip_counter));
+
+ /*
+ If the slave skip counter is positive, we still need to set the
+ OPTION_BEGIN flag correctly and not skip the log events that
+ start or end a transaction. If we do this, the slave will not
+ notice that it is inside a transaction, and happily start
+ executing from inside the transaction.
+
+ Note that the code block below is strictly 5.0.
+ */
+#if MYSQL_VERSION_ID < 50100
+ if (unlikely(rli->slave_skip_counter > 0))
+ {
+ switch (type_code)
+ {
+ case QUERY_EVENT:
+ {
+ Query_log_event* const qev= (Query_log_event*) ev;
+ DBUG_PRINT("info", ("QUERY_EVENT { query: '%s', q_len: %u }",
+ qev->query, qev->q_len));
+ if (memcmp("BEGIN", qev->query, qev->q_len+1) == 0)
+ thd->options|= OPTION_BEGIN;
+ else if (memcmp("COMMIT", qev->query, qev->q_len+1) == 0 ||
+ memcmp("ROLLBACK", qev->query, qev->q_len+1) == 0)
+ thd->options&= ~OPTION_BEGIN;
+ }
+ break;
+
+ case XID_EVENT:
+ DBUG_PRINT("info", ("XID_EVENT"));
+ thd->options&= ~OPTION_BEGIN;
+ break;
+ }
+ }
+#endif
if ((ev->server_id == (uint32) ::server_id &&
!replicate_same_server_id &&
@@ -3301,6 +3337,9 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
flush_relay_log_info(rli);
}
+ DBUG_PRINT("info", ("thd->options: %s",
+ (thd->options & OPTION_BEGIN) ? "OPTION_BEGIN" : ""))
+
/*
Protect against common user error of setting the counter to 1
instead of 2 while recovering from an insert which used auto_increment,
@@ -3311,6 +3350,15 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
type_code == RAND_EVENT ||
type_code == USER_VAR_EVENT) &&
rli->slave_skip_counter == 1) &&
+#if MYSQL_VERSION_ID < 50100
+ /*
+ Decrease the slave skip counter only if we are not inside
+ a transaction or the slave skip counter is more than
+ 1. The slave skip counter will be decreased from 1 to 0
+ when reaching the final ROLLBACK, COMMIT, or XID_EVENT.
+ */
+ (!(thd->options & OPTION_BEGIN) || rli->slave_skip_counter > 1) &&
+#endif
/*
The events from ourselves which have something to do with the relay
log itself must be skipped, true, but they mustn't decrement
@@ -3321,8 +3369,10 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
would not be skipped.
*/
!(ev->server_id == (uint32) ::server_id &&
- (type_code == ROTATE_EVENT || type_code == STOP_EVENT ||
- type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT)))
+ (type_code == ROTATE_EVENT ||
+ type_code == STOP_EVENT ||
+ type_code == START_EVENT_V3 ||
+ type_code == FORMAT_DESCRIPTION_EVENT)))
--rli->slave_skip_counter;
pthread_mutex_unlock(&rli->data_lock);
delete ev;
@@ -3530,7 +3580,7 @@ connected:
on with life.
*/
thd->proc_info = "Registering slave on master";
- if (register_slave_on_master(mysql) || update_slave_list(mysql, mi))
+ if (register_slave_on_master(mysql))
goto err;
}
@@ -4931,7 +4981,16 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
a new event and is queuing it; the false "0" will exist until SQL
finishes executing the new event; it will be look abnormal only if
the events have old timestamps (then you get "many", 0, "many").
- Transient phases like this can't really be fixed.
+
+ Transient phases like this can be fixed with implemeting
+ Heartbeat event which provides the slave the status of the
+ master at time the master does not have any new update to send.
+ Seconds_Behind_Master would be zero only when master has no
+ more updates in binlog for slave. The heartbeat can be sent
+ in a (small) fraction of slave_net_timeout. Until it's done
+ rli->last_master_timestamp is temporarely (for time of
+ waiting for the following event) reset whenever EOF is
+ reached.
*/
time_t save_timestamp= rli->last_master_timestamp;
rli->last_master_timestamp= 0;
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 69dda9ec1e8..5759d619e7e 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -100,8 +100,9 @@ sp_get_item_value(THD *thd, Item *item, String *str)
case REAL_RESULT:
case INT_RESULT:
case DECIMAL_RESULT:
- return item->val_str(str);
-
+ if (item->field_type() != MYSQL_TYPE_BIT)
+ return item->val_str(str);
+ else {/* Bit type is handled as binary string */}
case STRING_RESULT:
{
String *result= item->val_str(str);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 93a9d4d6da2..9f294d09d5a 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -159,7 +159,13 @@ typedef struct st_log_info
my_off_t pos;
bool fatal; // if the purge happens to give us a negative offset
pthread_mutex_t lock;
- st_log_info():fatal(0) { pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);}
+ st_log_info()
+ : index_file_offset(0), index_file_start_offset(0),
+ pos(0), fatal(0)
+ {
+ log_file_name[0] = '\0';
+ pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);
+ }
~st_log_info() { pthread_mutex_destroy(&lock);}
} LOG_INFO;
@@ -2372,6 +2378,11 @@ class multi_delete :public select_result_interceptor
/* True if at least one table we delete from is not transactional */
bool normal_tables;
bool delete_while_scanning;
+ /*
+ error handling (rollback and binlogging) can happen in send_eof()
+ so that afterward send_error() needs to find out that.
+ */
+ bool error_handled;
public:
multi_delete(TABLE_LIST *dt, uint num_of_tables);
@@ -2407,6 +2418,11 @@ class multi_update :public select_result_interceptor
/* True if the update operation has made a change in a transactional table */
bool transactional_tables;
bool ignore;
+ /*
+ error handling (rollback and binlogging) can happen in send_eof()
+ so that afterward send_error() needs to find out that.
+ */
+ bool error_handled;
public:
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 56edfa6c5b2..a28a39a769d 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -38,6 +38,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
ha_rows deleted;
uint usable_index= MAX_KEY;
SELECT_LEX *select_lex= &thd->lex->select_lex;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_delete");
if (open_and_lock_tables(thd, table_list))
@@ -280,8 +281,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
else
table->file->unlock_row(); // Row failed selection, release lock on it
}
- if (thd->killed && !error)
- error= 1; // Aborted
+ killed_status= thd->killed;
+ error= (killed_status == THD::NOT_KILLED)? error : 1;
thd->proc_info="end";
end_read_record(&info);
free_io_cache(table); // Will not do any harm
@@ -319,14 +320,14 @@ cleanup:
thd->transaction.stmt.modified_non_trans_table= TRUE;
/* See similar binlogging code in sql_update.cc, for comments */
- if ((error < 0) || (deleted && !transactional_table))
+ if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
if (error < 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_status);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
}
@@ -504,7 +505,7 @@ bool mysql_multi_delete_prepare(THD *thd)
multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg)
: delete_tables(dt), deleted(0), found(0),
num_of_tables(num_of_tables_arg), error(0),
- do_delete(0), transactional_tables(0), normal_tables(0)
+ do_delete(0), transactional_tables(0), normal_tables(0), error_handled(0)
{
tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables);
}
@@ -685,12 +686,14 @@ void multi_delete::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_message(errcode, err, MYF(0));
- /* If nothing deleted return */
- if (!deleted)
+ /* the error was handled or nothing deleted and no side effects return */
+ if (error_handled ||
+ !thd->transaction.stmt.modified_non_trans_table && !deleted)
DBUG_VOID_RETURN;
/* Something already deleted so we have to invalidate cache */
- query_cache_invalidate3(thd, delete_tables, 1);
+ if (deleted)
+ query_cache_invalidate3(thd, delete_tables, 1);
/*
If rows from the first table only has been deleted and it is
@@ -710,12 +713,30 @@ void multi_delete::send_error(uint errcode,const char *err)
*/
error= 1;
send_eof();
+ DBUG_ASSERT(error_handled);
+ DBUG_VOID_RETURN;
}
- DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table);
+
+ if (thd->transaction.stmt.modified_non_trans_table)
+ {
+ /*
+ there is only side effects; to binlog with the error
+ */
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query, thd->query_length,
+ transactional_tables, FALSE);
+ mysql_bin_log.write(&qinfo);
+ }
+ thd->transaction.all.modified_non_trans_table= true;
+ }
+ DBUG_ASSERT(!normal_tables || !deleted ||
+ thd->transaction.stmt.modified_non_trans_table);
DBUG_VOID_RETURN;
}
+
/*
Do delete from other tables.
Returns values:
@@ -798,6 +819,7 @@ int multi_delete::do_deletes()
bool multi_delete::send_eof()
{
+ THD::killed_state killed_status= THD::NOT_KILLED;
thd->proc_info="deleting from reference tables";
/* Does deletes for the last n - 1 tables, returns 0 if ok */
@@ -805,7 +827,7 @@ bool multi_delete::send_eof()
/* compute a total error to know if something failed */
local_error= local_error || error;
-
+ killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
/* reset used flags */
thd->proc_info="end";
@@ -817,21 +839,24 @@ bool multi_delete::send_eof()
{
query_cache_invalidate3(thd, delete_tables, 1);
}
- if ((local_error == 0) || (deleted && normal_tables))
+ DBUG_ASSERT(!normal_tables || !deleted ||
+ thd->transaction.stmt.modified_non_trans_table);
+ if ((local_error == 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
if (local_error == 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_tables, FALSE);
+ transactional_tables, FALSE, killed_status);
if (mysql_bin_log.write(&qinfo) && !normal_tables)
local_error=1; // Log write failed: roll back the SQL statement
}
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
- DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table);
+ if (local_error != 0)
+ error_handled= TRUE; // to force early leave from ::send_error()
/* Commit or rollback the current SQL statement */
if (transactional_tables)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 770bbd1349d..65d8bb23706 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -864,8 +864,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
transactional_table= table->file->has_transactions();
- if ((changed= (info.copied || info.deleted || info.updated)) ||
- was_insert_delayed)
+ if ((changed= (info.copied || info.deleted || info.updated)))
{
/*
Invalidate the table in the query cache if something changed.
@@ -874,46 +873,47 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
*/
if (changed)
query_cache_invalidate3(thd, table_list, 1);
- if (error <= 0 || !transactional_table)
+ }
+ if (changed && error <= 0 || thd->transaction.stmt.modified_non_trans_table
+ || was_insert_delayed)
+ {
+ if (mysql_bin_log.is_open())
{
- if (mysql_bin_log.is_open())
+ if (error <= 0)
{
- if (error <= 0)
- {
- /*
- [Guilhem wrote] Temporary errors may have filled
- thd->net.last_error/errno. For example if there has
- been a disk full error when writing the row, and it was
- MyISAM, then thd->net.last_error/errno will be set to
- "disk full"... and the my_pwrite() will wait until free
- space appears, and so when it finishes then the
- write_row() was entirely successful
- */
- /* todo: consider removing */
- thd->clear_error();
- }
- /* bug#22725:
-
- A query which per-row-loop can not be interrupted with
- KILLED, like INSERT, and that does not invoke stored
- routines can be binlogged with neglecting the KILLED error.
-
- If there was no error (error == zero) until after the end of
- inserting loop the KILLED flag that appeared later can be
- disregarded since previously possible invocation of stored
- routines did not result in any error due to the KILLED. In
- such case the flag is ignored for constructing binlog event.
+ /*
+ [Guilhem wrote] Temporary errors may have filled
+ thd->net.last_error/errno. For example if there has
+ been a disk full error when writing the row, and it was
+ MyISAM, then thd->net.last_error/errno will be set to
+ "disk full"... and the my_pwrite() will wait until free
+ space appears, and so when it finishes then the
+ write_row() was entirely successful
*/
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE,
- (error>0) ? thd->killed : THD::NOT_KILLED);
- DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0);
- if (mysql_bin_log.write(&qinfo) && transactional_table)
- error=1;
+ /* todo: consider removing */
+ thd->clear_error();
}
- if (thd->transaction.stmt.modified_non_trans_table)
- thd->transaction.all.modified_non_trans_table= TRUE;
+ /* bug#22725:
+
+ A query which per-row-loop can not be interrupted with
+ KILLED, like INSERT, and that does not invoke stored
+ routines can be binlogged with neglecting the KILLED error.
+
+ If there was no error (error == zero) until after the end of
+ inserting loop the KILLED flag that appeared later can be
+ disregarded since previously possible invocation of stored
+ routines did not result in any error due to the KILLED. In
+ such case the flag is ignored for constructing binlog event.
+ */
+ Query_log_event qinfo(thd, thd->query, thd->query_length,
+ transactional_table, FALSE,
+ (error>0) ? thd->killed : THD::NOT_KILLED);
+ DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0);
+ if (mysql_bin_log.write(&qinfo) && transactional_table)
+ error=1;
}
+ if (thd->transaction.stmt.modified_non_trans_table)
+ thd->transaction.all.modified_non_trans_table= TRUE;
}
DBUG_ASSERT(transactional_table || !changed ||
thd->transaction.stmt.modified_non_trans_table);
@@ -2938,6 +2938,7 @@ bool select_insert::send_eof()
{
int error, error2;
bool changed, transactional_table= table->file->has_transactions();
+ THD::killed_state killed_status= thd->killed;
DBUG_ENTER("select_insert::send_eof");
error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
@@ -2967,7 +2968,7 @@ bool select_insert::send_eof()
if (!error)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_status);
mysql_bin_log.write(&qinfo);
}
if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error)
@@ -3004,6 +3005,7 @@ void select_insert::abort()
*/
DBUG_VOID_RETURN;
}
+ changed= (info.copied || info.deleted || info.updated);
transactional_table= table->file->has_transactions();
if (!thd->prelocked_mode)
table->file->end_bulk_insert();
@@ -3013,8 +3015,7 @@ void select_insert::abort()
error while inserting into a MyISAM table) we must write to the binlog (and
the error code will make the slave stop).
*/
- if ((changed= info.copied || info.deleted || info.updated) &&
- !transactional_table)
+ if (thd->transaction.stmt.modified_non_trans_table)
{
if (last_insert_id)
thd->insert_id(last_insert_id); // For binary log
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 55cbbf1c540..d687ceff393 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -85,7 +85,8 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
- bool transactional_table);
+ bool transactional_table,
+ THD::killed_state killed_status);
#endif /* EMBEDDED_LIBRARY */
/*
@@ -135,6 +136,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
char *tdb= thd->db ? thd->db : db; // Result is never null
ulong skip_lines= ex->skip_lines;
bool transactional_table;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_load");
#ifdef EMBEDDED_LIBRARY
@@ -404,7 +406,16 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
free_blobs(table); /* if pack_blob was used */
table->copy_blobs=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
-
+ /*
+ simulated killing in the middle of per-row loop
+ must be effective for binlogging
+ */
+ DBUG_EXECUTE_IF("simulate_kill_bug27571",
+ {
+ error=1;
+ thd->killed= THD::KILL_QUERY;
+ };);
+ killed_status= (error == 0)? THD::NOT_KILLED : thd->killed;
/*
We must invalidate the table in query cache before binlog writing and
ha_autocommit_...
@@ -444,9 +455,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
/* If the file was not empty, wrote_create_file is true */
if (lf_info.wrote_create_file)
{
- if ((info.copied || info.deleted) && !transactional_table)
+ if (thd->transaction.stmt.modified_non_trans_table)
write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
+ ignore, transactional_table,
+ killed_status);
else
{
Delete_file_log_event d(thd, db, transactional_table);
@@ -477,7 +489,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
read_info.end_io_cache();
if (lf_info.wrote_create_file)
write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
+ ignore, transactional_table,
+ killed_status);
}
#endif /*!EMBEDDED_LIBRARY*/
if (transactional_table)
@@ -504,7 +517,8 @@ err:
/* Not a very useful function; just to avoid duplication of code */
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
- bool transactional_table)
+ bool transactional_table,
+ THD::killed_state killed_err_arg)
{
Execute_load_query_log_event
e(thd, thd->query, thd->query_length,
@@ -512,7 +526,7 @@ static bool write_execute_load_query_log_event(THD *thd,
(char*)thd->lex->fname_end - (char*)thd->query,
(duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
(ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_err_arg);
return mysql_bin_log.write(&e);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 0a8b92d28c0..16a64c48cdd 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2853,7 +2853,16 @@ mysql_execute_command(THD *thd)
if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL))
goto error;
pthread_mutex_lock(&LOCK_active_mi);
- res = show_master_info(thd,active_mi);
+ if (active_mi != NULL)
+ {
+ res = show_master_info(thd, active_mi);
+ }
+ else
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "the master info structure does not exist");
+ send_ok(thd);
+ }
pthread_mutex_unlock(&LOCK_active_mi);
break;
}
@@ -3701,6 +3710,13 @@ end_with_restore_list:
SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
OPTION_SETUP_TABLES_DONE,
del_result, unit, select_lex);
+ res|= thd->net.report_error;
+ if (unlikely(res))
+ {
+ /* If we had a another error reported earlier then this will be ignored */
+ del_result->send_error(ER_UNKNOWN_ERROR, "Execution of the query failed");
+ del_result->abort();
+ }
delete del_result;
}
else
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 9cc0ba7c29a..903d254db8f 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -364,7 +364,6 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
name=0; // Find first log
linfo.index_file_offset = 0;
- thd->current_linfo = &linfo;
if (mysql_bin_log.find_log_pos(&linfo, name, 1))
{
@@ -373,6 +372,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
goto err;
}
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->current_linfo = &linfo;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
{
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
@@ -1338,7 +1341,6 @@ bool mysql_show_binlog_events(THD* thd)
name=0; // Find first log
linfo.index_file_offset = 0;
- thd->current_linfo = &linfo;
if (mysql_bin_log.find_log_pos(&linfo, name, 1))
{
@@ -1346,6 +1348,10 @@ bool mysql_show_binlog_events(THD* thd)
goto err;
}
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->current_linfo = &linfo;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
goto err;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 321d07dec76..84349a40977 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -134,6 +134,7 @@ int mysql_update(THD *thd,
SELECT_LEX *select_lex= &thd->lex->select_lex;
bool need_reopen;
List<Item> all_fields;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_update");
LINT_INIT(timestamp_query_id);
@@ -519,43 +520,26 @@ int mysql_update(THD *thd,
table->file->unlock_row();
thd->row_count++;
}
+ /*
+ Caching the killed status to pass as the arg to query event constuctor;
+ The cached value can not change whereas the killed status can
+ (externally) since this point and change of the latter won't affect
+ binlogging.
+ It's assumed that if an error was set in combination with an effective
+ killed status then the error is due to killing.
+ */
+ killed_status= thd->killed; // get the status of the volatile
+ // simulated killing after the loop must be ineffective for binlogging
+ DBUG_EXECUTE_IF("simulate_kill_bug27571",
+ {
+ thd->killed= THD::KILL_QUERY;
+ };);
+ error= (killed_status == THD::NOT_KILLED)? error : 1;
+
if (!transactional_table && updated > 0)
thd->transaction.stmt.modified_non_trans_table= TRUE;
-
- /*
- todo bug#27571: to avoid asynchronization of `error' and
- `error_code' of binlog event constructor
-
- The concept, which is a bit different for insert(!), is to
- replace `error' assignment with the following lines
-
- killed_status= thd->killed; // get the status of the volatile
-
- Notice: thd->killed is type of "state" whereas the lhs has
- "status" the suffix which translates according to WordNet: a state
- at a particular time - at the time of the end of per-row loop in
- our case. Binlogging ops are conducted with the status.
-
- error= (killed_status == THD::NOT_KILLED)? error : 1;
-
- which applies to most mysql_$query functions.
- Event's constructor will accept `killed_status' as an argument:
-
- Query_log_event qinfo(..., killed_status);
-
- thd->killed might be changed after killed_status had got cached and this
- won't affect binlogging event but other effects remain.
-
- Open issue: In a case the error happened not because of KILLED -
- and then KILLED was caught later still within the loop - we shall
- do something to avoid binlogging of incorrect ER_SERVER_SHUTDOWN
- error_code.
- */
-
- if (thd->killed && !error)
- error= 1; // Aborted
end_read_record(&info);
free_io_cache(table); // If ORDER BY
delete select;
@@ -580,14 +564,14 @@ int mysql_update(THD *thd,
Sometimes we want to binlog even if we updated no rows, in case user used
it to be sure master and slave are in same state.
*/
- if ((error < 0) || (updated && !transactional_table))
+ if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
if (error < 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_status);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1; // Rollback update
}
@@ -994,8 +978,8 @@ multi_update::multi_update(TABLE_LIST *table_list,
:all_tables(table_list), leaves(leaves_list), update_tables(0),
tmp_tables(0), updated(0), found(0), fields(field_list),
values(value_list), table_count(0), copy_field(0),
- handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0),
- transactional_tables(1), ignore(ignore_arg)
+ handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
+ transactional_tables(1), ignore(ignore_arg), error_handled(0)
{}
@@ -1202,7 +1186,6 @@ multi_update::initialize_tables(JOIN *join)
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
DBUG_RETURN(1);
main_table=join->join_tab->table;
- trans_safe= transactional_tables= main_table->file->has_transactions();
table_to_update= 0;
/* Any update has at least one pair (field, value) */
@@ -1484,12 +1467,14 @@ void multi_update::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_error(errcode, MYF(0), err);
- /* If nothing updated return */
- if (updated == 0) /* the counter might be reset in send_eof */
- return; /* and then the query has been binlogged */
+ /* the error was handled or nothing deleted and no side effects return */
+ if (error_handled ||
+ !thd->transaction.stmt.modified_non_trans_table && !updated)
+ return;
/* Something already updated so we have to invalidate cache */
- query_cache_invalidate3(thd, update_tables, 1);
+ if (updated)
+ query_cache_invalidate3(thd, update_tables, 1);
/*
If all tables that has been updated are trans safe then just do rollback.
If not attempt to do remaining updates.
@@ -1521,12 +1506,16 @@ void multi_update::send_error(uint errcode,const char *err)
*/
if (mysql_bin_log.is_open())
{
+ /*
+ THD::killed status might not have been set ON at time of an error
+ got caught and if happens later the killed error is written
+ into repl event.
+ */
Query_log_event qinfo(thd, thd->query, thd->query_length,
transactional_tables, FALSE);
mysql_bin_log.write(&qinfo);
}
- if (!trans_safe)
- thd->transaction.all.modified_non_trans_table= TRUE;
+ thd->transaction.all.modified_non_trans_table= TRUE;
}
DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
@@ -1709,10 +1698,19 @@ err2:
bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
+ THD::killed_state killed_status= THD::NOT_KILLED;
thd->proc_info="updating reference tables";
- /* Does updates for the last n - 1 tables, returns 0 if ok */
+ /*
+ Does updates for the last n - 1 tables, returns 0 if ok;
+ error takes into account killed status gained in do_updates()
+ */
int local_error = (table_count) ? do_updates(0) : 0;
+ /*
+ if local_error is not set ON until after do_updates() then
+ later carried out killing should not affect binlogging.
+ */
+ killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
thd->proc_info= "end";
/* We must invalidate the query cache before binlog writing and
@@ -1739,16 +1737,16 @@ bool multi_update::send_eof()
{
if (local_error == 0)
thd->clear_error();
- else
- updated= 0; /* if there's an error binlog it here not in ::send_error */
Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_tables, FALSE);
+ transactional_tables, FALSE, killed_status);
if (mysql_bin_log.write(&qinfo) && trans_safe)
local_error= 1; // Rollback update
}
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
+ if (local_error != 0)
+ error_handled= TRUE; // to force early leave from ::send_error()
if (transactional_tables)
{