summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <mkindahl@dl145h.mysql.com>2007-11-21 21:15:33 +0100
committerunknown <mkindahl@dl145h.mysql.com>2007-11-21 21:15:33 +0100
commit692f0e4f67ee3a3e4e28b92074cd9872ab3ed644 (patch)
tree7eebc171f167cc46df49668d69760ac4e7a73137 /sql
parent1fb0a096cf9ca930fe9bc66b202c8662daa76a9a (diff)
parent47d30b190dd8ca35d035683ce6fba58d9f4688e0 (diff)
downloadmariadb-git-692f0e4f67ee3a3e4e28b92074cd9872ab3ed644.tar.gz
Merge dl145h.mysql.com:/data0/mkindahl/mysql-5.1
into dl145h.mysql.com:/data0/mkindahl/mysql-5.1-rpl-merge client/mysql.cc: Auto merged mysql-test/mysql-test-run.pl: Auto merged mysql-test/r/ctype_ucs.result: Auto merged mysql-test/r/mysql.result: Auto merged mysql-test/suite/ndb/r/ndb_dd_basic.result: Auto merged mysql-test/suite/rpl/r/rpl_extraCol_innodb.result: Auto merged mysql-test/suite/rpl/r/rpl_extraCol_myisam.result: Auto merged mysql-test/suite/rpl_ndb/r/rpl_ndb_extraCol.result: Auto merged mysql-test/t/ctype_uca.test: Auto merged mysql-test/t/ctype_ucs.test: Auto merged mysql-test/t/innodb.test: Auto merged mysql-test/t/mysql.test: Auto merged mysql-test/t/partition.test: Auto merged mysql-test/t/subselect.test: Auto merged sql/field.cc: Auto merged sql/ha_ndbcluster.cc: Auto merged sql/handler.cc: Auto merged sql/item_cmpfunc.cc: Auto merged sql/mysqld.cc: Auto merged sql/slave.cc: Auto merged sql/sql_class.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_insert.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_show.cc: Auto merged sql/sql_yacc.yy: Auto merged mysql-test/r/innodb.result: Manual merge.
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc414
-rw-r--r--sql/field.h271
-rw-r--r--sql/item_cmpfunc.cc122
-rw-r--r--sql/item_cmpfunc.h4
-rw-r--r--sql/item_xmlfunc.cc60
-rw-r--r--sql/log.cc13
-rw-r--r--sql/log.h8
-rw-r--r--sql/log_event.cc384
-rw-r--r--sql/log_event.h1060
-rw-r--r--sql/mysqld.cc1
-rw-r--r--sql/records.cc2
-rw-r--r--sql/rpl_record.cc53
-rw-r--r--sql/rpl_rli.cc10
-rw-r--r--sql/rpl_rli.h12
-rw-r--r--sql/rpl_utility.cc36
-rw-r--r--sql/rpl_utility.h14
-rw-r--r--sql/slave.cc46
-rw-r--r--sql/sp_head.cc5
-rw-r--r--sql/sql_class.h22
-rw-r--r--sql/sql_db.cc7
-rw-r--r--sql/sql_delete.cc51
-rw-r--r--sql/sql_insert.cc109
-rw-r--r--sql/sql_load.cc29
-rw-r--r--sql/sql_parse.cc18
-rw-r--r--sql/sql_repl.cc10
-rw-r--r--sql/sql_show.cc6
-rw-r--r--sql/sql_update.cc103
-rw-r--r--sql/sql_yacc.yy2
28 files changed, 2206 insertions, 666 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 36ba6e6f12c..cd8228305e4 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1394,20 +1394,85 @@ int Field::store(const char *to, uint length, CHARSET_INFO *cs,
/**
+ Pack the field into a format suitable for storage and transfer.
+
+ To implement packing functionality, only the virtual function
+ should be overridden. The other functions are just convenience
+ functions and hence should not be overridden.
+
+ The value of <code>low_byte_first</code> is dependent on how the
+ packed data is going to be used: for local use, e.g., temporary
+ store on disk or in memory, use the native format since that is
+ faster. For data that is going to be transfered to other machines
+ (e.g., when writing data to the binary log), data should always be
+ stored in little-endian format.
+
+ @note The default method for packing fields just copy the raw bytes
+ of the record into the destination, but never more than
+ <code>max_length</code> characters.
+
+ @param to
+ Pointer to memory area where representation of field should be put.
+
+ @param from
+ Pointer to memory area where record representation of field is
+ stored.
+
+ @param max_length
+ Maximum length of the field, as given in the column definition. For
+ example, for <code>CHAR(1000)</code>, the <code>max_length</code>
+ is 1000. This information is sometimes needed to decide how to pack
+ the data.
+
+ @param low_byte_first
+ @c TRUE if integers should be stored little-endian, @c FALSE if
+ native format should be used. Note that for little-endian machines,
+ the value of this flag is a moot point since the native format is
+ little-endian.
+*/
+uchar *
+Field::pack(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
+{
+ uint32 length= pack_length();
+ set_if_smaller(length, max_length);
+ memcpy(to, from, length);
+ return to+length;
+}
+
+/**
Unpack a field from row data.
- This method is used to unpack a field from a master whose size
- of the field is less than that of the slave.
-
+ This method is used to unpack a field from a master whose size of
+ the field is less than that of the slave.
+
+ The <code>param_data</code> parameter is a two-byte integer (stored
+ in the least significant 16 bits of the unsigned integer) usually
+ consisting of two parts: the real type in the most significant byte
+ and a original pack length in the least significant byte.
+
+ The exact layout of the <code>param_data</code> field is given by
+ the <code>Table_map_log_event::save_field_metadata()</code>.
+
+ This is the default method for unpacking a field. It just copies
+ the memory block in byte order (of original pack length bytes or
+ length of field, whichever is smaller).
+
@param to Destination of the data
@param from Source of the data
- @param param_data Pack length of the field data
+ @param param_data Real type and original pack length of the field
+ data
+
+ @param low_byte_first
+ If this flag is @c true, all composite entities (e.g., lengths)
+ should be unpacked in little-endian format; otherwise, the entities
+ are unpacked in native order.
@return New pointer into memory based on from + length of the data
*/
-const uchar *Field::unpack(uchar* to,
- const uchar *from,
- uint param_data)
+const uchar *
+Field::unpack(uchar* to, const uchar *from, uint param_data,
+ bool low_byte_first __attribute__((unused)))
{
uint length=pack_length();
int from_type= 0;
@@ -1420,19 +1485,18 @@ const uchar *Field::unpack(uchar* to,
from_type= (param_data & 0xff00) >> 8U; // real_type.
param_data= param_data & 0x00ff; // length.
}
+
+ if ((param_data == 0) ||
+ (length == param_data) ||
+ (from_type != real_type()))
+ {
+ memcpy(to, from, length);
+ return from+length;
+ }
+
uint len= (param_data && (param_data < length)) ?
param_data : length;
- /*
- If the length is the same, use old unpack method.
- If the param_data is 0, use the old unpack method.
- This is possible if the table map was generated from a down-level
- master or if the data was not available on the master.
- If the real_types are not the same, use the old unpack method.
- */
- if ((length == param_data) ||
- (param_data == 0) ||
- (from_type != real_type()))
- return(unpack(to, from));
+
memcpy(to, from, param_data > length ? length : len);
return from+len;
}
@@ -2814,10 +2878,15 @@ uint Field_new_decimal::is_equal(Create_field *new_field)
@return New pointer into memory based on from + length of the data
*/
-const uchar *Field_new_decimal::unpack(uchar* to,
- const uchar *from,
- uint param_data)
+const uchar *
+Field_new_decimal::unpack(uchar* to,
+ const uchar *from,
+ uint param_data,
+ bool low_byte_first)
{
+ if (param_data == 0)
+ return Field::unpack(to, from, param_data, low_byte_first);
+
uint from_precision= (param_data & 0xff00) >> 8U;
uint from_decimal= param_data & 0x00ff;
uint length=pack_length();
@@ -3959,6 +4028,49 @@ void Field_longlong::sql_type(String &res) const
}
+/*
+ Floating-point numbers
+ */
+
+uchar *
+Field_real::pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first)
+{
+ DBUG_ENTER("Field_real::pack");
+ DBUG_ASSERT(max_length >= pack_length());
+ DBUG_PRINT("debug", ("pack_length(): %u", pack_length()));
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first != table->s->db_low_byte_first)
+ {
+ const uchar *dptr= from + pack_length();
+ while (dptr-- > from)
+ *to++ = *dptr;
+ DBUG_RETURN(to);
+ }
+ else
+#endif
+ DBUG_RETURN(Field::pack(to, from, max_length, low_byte_first));
+}
+
+const uchar *
+Field_real::unpack(uchar *to, const uchar *from,
+ uint param_data, bool low_byte_first)
+{
+ DBUG_ENTER("Field_real::unpack");
+ DBUG_PRINT("debug", ("pack_length(): %u", pack_length()));
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first != table->s->db_low_byte_first)
+ {
+ const uchar *dptr= from + pack_length();
+ while (dptr-- > from)
+ *to++ = *dptr;
+ DBUG_RETURN(from + pack_length());
+ }
+ else
+#endif
+ DBUG_RETURN(Field::unpack(to, from, param_data, low_byte_first));
+}
+
/****************************************************************************
single precision float
****************************************************************************/
@@ -6367,6 +6479,11 @@ int Field_longstr::store_decimal(const my_decimal *d)
return store(str.ptr(), str.length(), str.charset());
}
+uint32 Field_longstr::max_data_length() const
+{
+ return field_length + (field_length > 255 ? 2 : 1);
+}
+
double Field_string::val_real(void)
{
@@ -6511,7 +6628,9 @@ void Field_string::sql_type(String &res) const
}
-uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
+uchar *Field_string::pack(uchar *to, const uchar *from,
+ uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
uint length= min(field_length,max_length);
uint local_char_length= max_length/field_charset->mbmaxlen;
@@ -6519,11 +6638,15 @@ uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
local_char_length= my_charpos(field_charset, from, from+length,
local_char_length);
set_if_smaller(length, local_char_length);
- while (length && from[length-1] == ' ')
+ while (length && from[length-1] == field_charset->pad_char)
length--;
+
+ // Length always stored little-endian
*to++= (uchar) length;
if (field_length > 255)
*to++= (uchar) (length >> 8);
+
+ // Store the actual bytes of the string
memcpy(to, from, length);
return to+length;
}
@@ -6545,34 +6668,27 @@ uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
@return New pointer into memory based on from + length of the data
*/
-const uchar *Field_string::unpack(uchar *to,
- const uchar *from,
- uint param_data)
-{
- uint from_len= param_data & 0x00ff; // length.
- uint length= 0;
- uint f_length;
- f_length= (from_len < field_length) ? from_len : field_length;
- DBUG_ASSERT(f_length <= 255);
- length= (uint) *from++;
- bitmap_set_bit(table->write_set,field_index);
- store((const char *)from, length, system_charset_info);
- return from+length;
-}
-
-
-const uchar *Field_string::unpack(uchar *to, const uchar *from)
-{
+const uchar *
+Field_string::unpack(uchar *to,
+ const uchar *from,
+ uint param_data,
+ bool low_byte_first __attribute__((unused)))
+{
+ uint from_length=
+ param_data ? min(param_data & 0x00ff, field_length) : field_length;
uint length;
- if (field_length > 255)
+
+ if (from_length > 255)
{
length= uint2korr(from);
from+= 2;
}
else
length= (uint) *from++;
- memcpy(to, from, (int) length);
- bfill(to+length, field_length - length, ' ');
+
+ memcpy(to, from, length);
+ // Pad the string with the pad character of the fields charset
+ bfill(to + length, field_length - length, field_charset->pad_char);
return from+length;
}
@@ -6762,6 +6878,7 @@ const uint Field_varstring::MAX_SIZE= UINT_MAX16;
int Field_varstring::do_save_field_metadata(uchar *metadata_ptr)
{
char *ptr= (char *)metadata_ptr;
+ DBUG_ASSERT(field_length <= 65535);
int2store(ptr, field_length);
return 2;
}
@@ -6989,22 +7106,30 @@ uint32 Field_varstring::data_length()
Here the number of length bytes are depending on the given max_length
*/
-uchar *Field_varstring::pack(uchar *to, const uchar *from, uint max_length)
+uchar *Field_varstring::pack(uchar *to, const uchar *from,
+ uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
uint length= length_bytes == 1 ? (uint) *from : uint2korr(from);
set_if_smaller(max_length, field_length);
if (length > max_length)
length=max_length;
- *to++= (char) (length & 255);
+
+ /* Length always stored little-endian */
+ *to++= length & 0xFF;
if (max_length > 255)
- *to++= (char) (length >> 8);
- if (length)
+ *to++= (length >> 8) & 0xFF;
+
+ /* Store bytes of string */
+ if (length > 0)
memcpy(to, from+length_bytes, length);
return to+length;
}
-uchar *Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length)
+uchar *
+Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
uint length= length_bytes == 1 ? (uint) *key : uint2korr(key);
uint local_char_length= ((field_charset->mbmaxlen > 1) ?
@@ -7043,8 +7168,9 @@ uchar *Field_varstring::pack_key(uchar *to, const uchar *key, uint max_length)
Pointer to end of 'key' (To the next key part if multi-segment key)
*/
-const uchar *Field_varstring::unpack_key(uchar *to, const uchar *key,
- uint max_length)
+const uchar *
+Field_varstring::unpack_key(uchar *to, const uchar *key, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
/* get length of the blob key */
uint32 length= *key++;
@@ -7073,8 +7199,9 @@ const uchar *Field_varstring::unpack_key(uchar *to, const uchar *key,
end of key storage
*/
-uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
- uint max_length)
+uchar *
+Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
/* Key length is always stored as 2 bytes */
uint length= uint2korr(from);
@@ -7094,6 +7221,9 @@ uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
This method is used to unpack a varstring field from a master
whose size of the field is less than that of the slave.
+
+ @note
+ The string length is always packed little-endian.
@param to Destination of the data
@param from Source of the data
@@ -7101,9 +7231,10 @@ uchar *Field_varstring::pack_key_from_key_image(uchar *to, const uchar *from,
@return New pointer into memory based on from + length of the data
*/
-const uchar *Field_varstring::unpack(uchar *to,
- const uchar *from,
- uint param_data)
+const uchar *
+Field_varstring::unpack(uchar *to, const uchar *from,
+ uint param_data,
+ bool low_byte_first __attribute__((unused)))
{
uint length;
uint l_bytes= (param_data && (param_data < field_length)) ?
@@ -7115,28 +7246,7 @@ const uchar *Field_varstring::unpack(uchar *to,
if (length_bytes == 2)
to[1]= 0;
}
- else
- {
- length= uint2korr(from);
- to[0]= *from++;
- to[1]= *from++;
- }
- if (length)
- memcpy(to+ length_bytes, from, length);
- return from+length;
-}
-
-
-/*
- unpack field packed with Field_varstring::pack()
-*/
-
-const uchar *Field_varstring::unpack(uchar *to, const uchar *from)
-{
- uint length;
- if (length_bytes == 1)
- length= (uint) (*to= *from++);
- else
+ else /* l_bytes == 2 */
{
length= uint2korr(from);
to[0]= *from++;
@@ -7385,9 +7495,9 @@ void Field_blob::store_length(uchar *i_ptr,
}
-uint32 Field_blob::get_length(const uchar *pos, bool low_byte_first)
+uint32 Field_blob::get_length(const uchar *pos, uint packlength_arg, bool low_byte_first)
{
- switch (packlength) {
+ switch (packlength_arg) {
case 1:
return (uint32) pos[0];
case 2:
@@ -7818,26 +7928,37 @@ void Field_blob::sql_type(String &res) const
}
}
-
-uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
+uchar *Field_blob::pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first)
{
+ DBUG_ENTER("Field_blob::pack");
+ DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx;"
+ " max_length: %u; low_byte_first: %d",
+ (ulong) to, (ulong) from,
+ max_length, low_byte_first));
+ DBUG_DUMP("record", from, table->s->reclength);
uchar *save= ptr;
ptr= (uchar*) from;
uint32 length=get_length(); // Length of from string
- if (length > max_length)
- {
- length=max_length;
- store_length(to,packlength,length,TRUE);
- }
- else
- memcpy(to,from,packlength); // Copy length
- if (length)
+
+ /*
+ Store max length, which will occupy packlength bytes. If the max
+ length given is smaller than the actual length of the blob, we
+ just store the initial bytes of the blob.
+ */
+ store_length(to, packlength, min(length, max_length), low_byte_first);
+
+ /*
+ Store the actual blob data, which will occupy 'length' bytes.
+ */
+ if (length > 0)
{
get_ptr((uchar**) &from);
memcpy(to+packlength, from,length);
}
ptr=save; // Restore org row pointer
- return to+packlength+length;
+ DBUG_DUMP("packed", to, packlength + length);
+ DBUG_RETURN(to+packlength+length);
}
@@ -7852,28 +7973,30 @@ uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
@param to Destination of the data
@param from Source of the data
- @param param_data not used
+ @param param_data @c TRUE if base types should be stored in little-
+ endian format, @c FALSE if native format should
+ be used.
@return New pointer into memory based on from + length of the data
*/
const uchar *Field_blob::unpack(uchar *to,
const uchar *from,
- uint param_data)
-{
- return unpack(to, from);
-}
-
-
-const uchar *Field_blob::unpack(uchar *to, const uchar *from)
-{
- uint32 length=get_length(from);
- memcpy(to,from,packlength);
- from+=packlength;
- if (length)
- memcpy_fixed(to+packlength, &from, sizeof(from));
- else
- bzero(to+packlength,sizeof(from));
- return from+length;
+ uint param_data,
+ bool low_byte_first)
+{
+ DBUG_ENTER("Field_blob::unpack");
+ DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx;"
+ " param_data: %u; low_byte_first: %d",
+ (ulong) to, (ulong) from, param_data, low_byte_first));
+ uint const master_packlength=
+ param_data > 0 ? param_data & 0xFF : packlength;
+ uint32 const length= get_length(from, master_packlength, low_byte_first);
+ DBUG_DUMP("packed", from, length + master_packlength);
+ bitmap_set_bit(table->write_set, field_index);
+ store(reinterpret_cast<const char*>(from) + master_packlength,
+ length, field_charset);
+ DBUG_DUMP("record", to, table->s->reclength);
+ DBUG_RETURN(from + master_packlength + length);
}
/* Keys for blobs are like keys on varchars */
@@ -7923,7 +8046,9 @@ int Field_blob::pack_cmp(const uchar *b, uint key_length_arg,
/* Create a packed key that will be used for storage from a MySQL row */
-uchar *Field_blob::pack_key(uchar *to, const uchar *from, uint max_length)
+uchar *
+Field_blob::pack_key(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
uchar *save= ptr;
ptr= (uchar*) from;
@@ -7968,8 +8093,9 @@ uchar *Field_blob::pack_key(uchar *to, const uchar *from, uint max_length)
Pointer into 'from' past the last byte copied from packed key.
*/
-const uchar *Field_blob::unpack_key(uchar *to, const uchar *from,
- uint max_length)
+const uchar *
+Field_blob::unpack_key(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
/* get length of the blob key */
uint32 length= *from++;
@@ -7992,8 +8118,9 @@ const uchar *Field_blob::unpack_key(uchar *to, const uchar *from,
/* Create a packed key that will be used for storage from a MySQL key */
-uchar *Field_blob::pack_key_from_key_image(uchar *to, const uchar *from,
- uint max_length)
+uchar *
+Field_blob::pack_key_from_key_image(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
uint length=uint2korr(from);
if (length > max_length)
@@ -8940,9 +9067,11 @@ void Field_bit::sql_type(String &res) const
}
-uchar *Field_bit::pack(uchar *to, const uchar *from, uint max_length)
+uchar *
+Field_bit::pack(uchar *to, const uchar *from, uint max_length,
+ bool low_byte_first __attribute__((unused)))
{
- DBUG_ASSERT(max_length);
+ DBUG_ASSERT(max_length > 0);
uint length;
if (bit_len > 0)
{
@@ -8977,28 +9106,44 @@ uchar *Field_bit::pack(uchar *to, const uchar *from, uint max_length)
/**
Unpack a bit field from row data.
- This method is used to unpack a bit field from a master whose size
+ This method is used to unpack a bit field from a master whose size
of the field is less than that of the slave.
-
+
@param to Destination of the data
@param from Source of the data
@param param_data Bit length (upper) and length (lower) values
@return New pointer into memory based on from + length of the data
*/
-const uchar *Field_bit::unpack(uchar *to,
- const uchar *from,
- uint param_data)
+const uchar *
+Field_bit::unpack(uchar *to, const uchar *from, uint param_data,
+ bool low_byte_first __attribute__((unused)))
{
uint const from_len= (param_data >> 8U) & 0x00ff;
uint const from_bit_len= param_data & 0x00ff;
/*
- If the master and slave have the same sizes, then use the old
- unpack() method.
+ If the parameter data is zero (i.e., undefined), or if the master
+ and slave have the same sizes, then use the old unpack() method.
*/
- if ((from_bit_len == bit_len) &&
- (from_len == bytes_in_rec))
- return(unpack(to, from));
+ if (param_data == 0 ||
+ (from_bit_len == bit_len) && (from_len == bytes_in_rec))
+ {
+ if (bit_len > 0)
+ {
+ /*
+ set_rec_bits is a macro, don't put the post-increment in the
+ argument since that might cause strange side-effects.
+
+ For the choice of the second argument, see the explanation for
+ Field_bit::pack().
+ */
+ set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len);
+ from++;
+ }
+ memcpy(to, from, bytes_in_rec);
+ return from + bytes_in_rec;
+ }
+
/*
We are converting a smaller bit field to a larger one here.
To do that, we first need to construct a raw value for the original
@@ -9026,25 +9171,6 @@ const uchar *Field_bit::unpack(uchar *to,
}
-const uchar *Field_bit::unpack(uchar *to, const uchar *from)
-{
- if (bit_len > 0)
- {
- /*
- set_rec_bits is a macro, don't put the post-increment in the
- argument since that might cause strange side-effects.
-
- For the choice of the second argument, see the explanation for
- Field_bit::pack().
- */
- set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len);
- from++;
- }
- memcpy(to, from, bytes_in_rec);
- return from + bytes_in_rec;
-}
-
-
void Field_bit::set_default()
{
if (bit_len > 0)
diff --git a/sql/field.h b/sql/field.h
index d0d867d0b32..4b09f50a59a 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -175,6 +175,17 @@ public:
*/
virtual uint32 data_length() { return pack_length(); }
virtual uint32 sort_length() const { return pack_length(); }
+
+ /**
+ Get the maximum size of the data in packed format.
+
+ @return Maximum data length of the field when packed using the
+ Field::pack() function.
+ */
+ virtual uint32 max_data_length() const {
+ return pack_length();
+ };
+
virtual int reset(void) { bzero(ptr,pack_length()); return 0; }
virtual void reset_fields() {}
virtual void set_default()
@@ -357,32 +368,45 @@ public:
return str;
}
virtual bool send_binary(Protocol *protocol);
- virtual uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0)
+
+ virtual uchar *pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
+ /**
+ @overload Field::pack(uchar*, const uchar*, uint, bool)
+ */
+ uchar *pack(uchar *to, const uchar *from)
{
- uint32 length=pack_length();
- memcpy(to,from,length);
- return to+length;
+ DBUG_ENTER("Field::pack");
+ uchar *result= this->pack(to, from, UINT_MAX, table->s->db_low_byte_first);
+ DBUG_RETURN(result);
}
- virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
- virtual const uchar *unpack(uchar* to, const uchar *from)
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first);
+ /**
+ @overload Field::unpack(uchar*, const uchar*, uint, bool)
+ */
+ const uchar *unpack(uchar* to, const uchar *from)
{
- uint length=pack_length();
- memcpy(to,from,length);
- return from+length;
+ DBUG_ENTER("Field::unpack");
+ const uchar *result= unpack(to, from, 0U, table->s->db_low_byte_first);
+ DBUG_RETURN(result);
}
- virtual uchar *pack_key(uchar* to, const uchar *from, uint max_length)
+
+ virtual uchar *pack_key(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
{
- return pack(to,from,max_length);
+ return pack(to, from, max_length, low_byte_first);
}
virtual uchar *pack_key_from_key_image(uchar* to, const uchar *from,
- uint max_length)
+ uint max_length, bool low_byte_first)
{
- return pack(to,from,max_length);
+ return pack(to, from, max_length, low_byte_first);
}
virtual const uchar *unpack_key(uchar* to, const uchar *from,
- uint max_length)
+ uint max_length, bool low_byte_first)
{
- return unpack(to,from);
+ return unpack(to, from, max_length, low_byte_first);
}
virtual uint packed_col_length(const uchar *to, uint length)
{ return length;}
@@ -567,6 +591,7 @@ public:
{}
int store_decimal(const my_decimal *d);
+ uint32 max_data_length() const;
};
/* base class for float and double and decimal (old one) */
@@ -587,6 +612,10 @@ public:
int truncate(double *nr, double max_length);
uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first);
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first);
};
@@ -615,6 +644,16 @@ public:
void overflow(bool negative);
bool zero_pack() const { return 0; }
void sql_type(String &str) const;
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ return Field::unpack(to, from, param_data, low_byte_first);
+ }
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ return Field::pack(to, from, max_length, low_byte_first);
+ }
};
@@ -665,7 +704,8 @@ public:
uint row_pack_length() { return pack_length(); }
int compatible_field_size(uint field_metadata);
uint is_equal(Create_field *new_field);
- virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first);
};
@@ -696,6 +736,20 @@ public:
uint32 pack_length() const { return 1; }
void sql_type(String &str) const;
uint32 max_display_length() { return 4; }
+
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ *to= *from;
+ return to + 1;
+ }
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ *to= *from;
+ return from + 1;
+ }
};
@@ -731,8 +785,47 @@ public:
uint32 pack_length() const { return 2; }
void sql_type(String &str) const;
uint32 max_display_length() { return 6; }
-};
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ int16 val;
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ val = sint2korr(from);
+ else
+#endif
+ shortget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ int2store(to, val);
+ else
+#endif
+ shortstore(to, val);
+ return to + sizeof(val);
+ }
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ int16 val;
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ val = sint2korr(from);
+ else
+#endif
+ shortget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ int2store(to, val);
+ else
+#endif
+ shortstore(to, val);
+ return from + sizeof(val);
+ }
+};
class Field_medium :public Field_num {
public:
@@ -761,6 +854,18 @@ public:
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
uint32 max_display_length() { return 8; }
+
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ return Field::pack(to, from, max_length, low_byte_first);
+ }
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ return Field::unpack(to, from, param_data, low_byte_first);
+ }
};
@@ -796,6 +901,45 @@ public:
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
uint32 max_display_length() { return MY_INT32_NUM_DECIMAL_DIGITS; }
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ int32 val;
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ val = sint4korr(from);
+ else
+#endif
+ longget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ int4store(to, val);
+ else
+#endif
+ longstore(to, val);
+ return to + sizeof(val);
+ }
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ int32 val;
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ val = sint4korr(from);
+ else
+#endif
+ longget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ int4store(to, val);
+ else
+#endif
+ longstore(to, val);
+ return from + sizeof(val);
+ }
};
@@ -838,6 +982,45 @@ public:
void sql_type(String &str) const;
bool can_be_compared_as_longlong() const { return TRUE; }
uint32 max_display_length() { return 20; }
+ virtual uchar *pack(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first)
+ {
+ int64 val;
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ val = sint8korr(from);
+ else
+#endif
+ longlongget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ int8store(to, val);
+ else
+#endif
+ longlongstore(to, val);
+ return to + sizeof(val);
+ }
+
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first)
+ {
+ int64 val;
+#ifdef WORDS_BIGENDIAN
+ if (low_byte_first)
+ val = sint8korr(from);
+ else
+#endif
+ longlongget(val, from);
+
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ int8store(to, val);
+ else
+#endif
+ longlongstore(to, val);
+ return from + sizeof(val);
+ }
};
#endif
@@ -1218,9 +1401,10 @@ public:
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
void sql_type(String &str) const;
- uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
- virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
- const uchar *unpack(uchar* to, const uchar *from);
+ virtual uchar *pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first);
uint pack_length_from_metadata(uint field_metadata)
{ return (field_metadata & 0x00ff); }
uint row_pack_length() { return (field_length + 1); }
@@ -1298,13 +1482,15 @@ public:
uint get_key_image(uchar *buff,uint length, imagetype type);
void set_key_image(const uchar *buff,uint length);
void sql_type(String &str) const;
- uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
- uchar *pack_key(uchar *to, const uchar *from, uint max_length);
+ virtual uchar *pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
+ uchar *pack_key(uchar *to, const uchar *from, uint max_length, bool low_byte_first);
uchar *pack_key_from_key_image(uchar* to, const uchar *from,
- uint max_length);
- virtual const uchar *unpack(uchar* to, const uchar *from, uint param_data);
- const uchar *unpack(uchar* to, const uchar *from);
- const uchar *unpack_key(uchar* to, const uchar *from, uint max_length);
+ uint max_length, bool low_byte_first);
+ virtual const uchar *unpack(uchar* to, const uchar *from,
+ uint param_data, bool low_byte_first);
+ const uchar *unpack_key(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first);
int pack_cmp(const uchar *a, const uchar *b, uint key_length,
my_bool insert_or_update);
int pack_cmp(const uchar *b, uint key_length,my_bool insert_or_update);
@@ -1397,7 +1583,7 @@ public:
{ return (uint32) (packlength); }
uint row_pack_length() { return pack_length_no_ptr(); }
uint32 sort_length() const;
- inline uint32 max_data_length() const
+ virtual uint32 max_data_length() const
{
return (uint32) (((ulonglong) 1 << (packlength*8)) -1);
}
@@ -1425,13 +1611,13 @@ public:
@returns The length in the row plus the size of the data.
*/
uint32 get_packed_size(const uchar *ptr_arg, bool low_byte_first)
- {return packlength + get_length(ptr_arg, low_byte_first);}
+ {return packlength + get_length(ptr_arg, packlength, low_byte_first);}
inline uint32 get_length(uint row_offset= 0)
- { return get_length(ptr+row_offset, table->s->db_low_byte_first); }
- uint32 get_length(const uchar *ptr, bool low_byte_first);
+ { return get_length(ptr+row_offset, this->packlength, table->s->db_low_byte_first); }
+ uint32 get_length(const uchar *ptr, uint packlength, bool low_byte_first);
uint32 get_length(const uchar *ptr_arg)
- { return get_length(ptr_arg, table->s->db_low_byte_first); }
+ { return get_length(ptr_arg, this->packlength, table->s->db_low_byte_first); }
void put_length(uchar *pos, uint32 length);
inline void get_ptr(uchar **str)
{
@@ -1472,13 +1658,16 @@ public:
memcpy_fixed(ptr+packlength,&tmp,sizeof(char*));
return 0;
}
- uchar *pack(uchar *to, const uchar *from, uint max_length= ~(uint) 0);
- uchar *pack_key(uchar *to, const uchar *from, uint max_length);
+ virtual uchar *pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
+ uchar *pack_key(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
uchar *pack_key_from_key_image(uchar* to, const uchar *from,
- uint max_length);
- virtual const uchar *unpack(uchar *to, const uchar *from, uint param_data);
- const uchar *unpack(uchar *to, const uchar *from);
- const uchar *unpack_key(uchar* to, const uchar *from, uint max_length);
+ uint max_length, bool low_byte_first);
+ virtual const uchar *unpack(uchar *to, const uchar *from,
+ uint param_data, bool low_byte_first);
+ const uchar *unpack_key(uchar* to, const uchar *from,
+ uint max_length, bool low_byte_first);
int pack_cmp(const uchar *a, const uchar *b, uint key_length,
my_bool insert_or_update);
int pack_cmp(const uchar *b, uint key_length,my_bool insert_or_update);
@@ -1630,6 +1819,7 @@ public:
enum_field_types type() const { return MYSQL_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
uint32 key_length() const { return (uint32) (field_length + 7) / 8; }
+ uint32 max_data_length() const { return (field_length + 7) / 8; }
uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
Item_result result_type () const { return INT_RESULT; }
@@ -1671,9 +1861,10 @@ public:
{ return (bytes_in_rec + ((bit_len > 0) ? 1 : 0)); }
int compatible_field_size(uint field_metadata);
void sql_type(String &str) const;
- uchar *pack(uchar *to, const uchar *from, uint max_length=~(uint) 0);
- virtual const uchar *unpack(uchar *to, const uchar *from, uint param_data);
- const uchar *unpack(uchar* to, const uchar *from);
+ virtual uchar *pack(uchar *to, const uchar *from,
+ uint max_length, bool low_byte_first);
+ virtual const uchar *unpack(uchar *to, const uchar *from,
+ uint param_data, bool low_byte_first);
virtual void set_default();
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 639788d65bc..006846b147f 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -4394,6 +4394,51 @@ void Item_func_like::cleanup()
#ifdef USE_REGEX
bool
+Item_func_regex::regcomp(bool send_error)
+{
+ char buff[MAX_FIELD_WIDTH];
+ String tmp(buff,sizeof(buff),&my_charset_bin);
+ String *res= args[1]->val_str(&tmp);
+ int error;
+
+ if (args[1]->null_value)
+ return TRUE;
+
+ if (regex_compiled)
+ {
+ if (!stringcmp(res, &prev_regexp))
+ return FALSE;
+ prev_regexp.copy(*res);
+ my_regfree(&preg);
+ regex_compiled= 0;
+ }
+
+ if (cmp_collation.collation != regex_lib_charset)
+ {
+ /* Convert UCS2 strings to UTF8 */
+ uint dummy_errors;
+ if (conv.copy(res->ptr(), res->length(), res->charset(),
+ regex_lib_charset, &dummy_errors))
+ return TRUE;
+ res= &conv;
+ }
+
+ if ((error= my_regcomp(&preg, res->c_ptr_safe(),
+ regex_lib_flags, regex_lib_charset)))
+ {
+ if (send_error)
+ {
+ (void) my_regerror(error, &preg, buff, sizeof(buff));
+ my_error(ER_REGEXP_ERROR, MYF(0), buff);
+ }
+ return TRUE;
+ }
+ regex_compiled= 1;
+ return FALSE;
+}
+
+
+bool
Item_func_regex::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
@@ -4409,35 +4454,34 @@ Item_func_regex::fix_fields(THD *thd, Item **ref)
if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1))
return TRUE;
+ regex_lib_flags= (cmp_collation.collation->state &
+ (MY_CS_BINSORT | MY_CS_CSSORT)) ?
+ REG_EXTENDED | REG_NOSUB :
+ REG_EXTENDED | REG_NOSUB | REG_ICASE;
+ /*
+ If the case of UCS2 and other non-ASCII character sets,
+ we will convert patterns and strings to UTF8.
+ */
+ regex_lib_charset= (cmp_collation.collation->mbminlen > 1) ?
+ &my_charset_utf8_general_ci :
+ cmp_collation.collation;
+
used_tables_cache=args[0]->used_tables() | args[1]->used_tables();
not_null_tables_cache= (args[0]->not_null_tables() |
args[1]->not_null_tables());
const_item_cache=args[0]->const_item() && args[1]->const_item();
if (!regex_compiled && args[1]->const_item())
{
- char buff[MAX_FIELD_WIDTH];
- String tmp(buff,sizeof(buff),&my_charset_bin);
- String *res=args[1]->val_str(&tmp);
if (args[1]->null_value)
{ // Will always return NULL
maybe_null=1;
fixed= 1;
return FALSE;
}
- int error;
- if ((error= my_regcomp(&preg,res->c_ptr(),
- ((cmp_collation.collation->state &
- (MY_CS_BINSORT | MY_CS_CSSORT)) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE),
- cmp_collation.collation)))
- {
- (void) my_regerror(error,&preg,buff,sizeof(buff));
- my_error(ER_REGEXP_ERROR, MYF(0), buff);
+ if (regcomp(TRUE))
return TRUE;
- }
- regex_compiled=regex_is_const=1;
- maybe_null=args[0]->maybe_null;
+ regex_is_const= 1;
+ maybe_null= args[0]->maybe_null;
}
else
maybe_null=1;
@@ -4450,47 +4494,25 @@ longlong Item_func_regex::val_int()
{
DBUG_ASSERT(fixed == 1);
char buff[MAX_FIELD_WIDTH];
- String *res, tmp(buff,sizeof(buff),&my_charset_bin);
+ String tmp(buff,sizeof(buff),&my_charset_bin);
+ String *res= args[0]->val_str(&tmp);
- res=args[0]->val_str(&tmp);
- if (args[0]->null_value)
- {
- null_value=1;
+ if ((null_value= (args[0]->null_value ||
+ (!regex_is_const && regcomp(FALSE)))))
return 0;
- }
- if (!regex_is_const)
- {
- char buff2[MAX_FIELD_WIDTH];
- String *res2, tmp2(buff2,sizeof(buff2),&my_charset_bin);
- res2= args[1]->val_str(&tmp2);
- if (args[1]->null_value)
+ if (cmp_collation.collation != regex_lib_charset)
+ {
+ /* Convert UCS2 strings to UTF8 */
+ uint dummy_errors;
+ if (conv.copy(res->ptr(), res->length(), res->charset(),
+ regex_lib_charset, &dummy_errors))
{
- null_value=1;
+ null_value= 1;
return 0;
}
- if (!regex_compiled || stringcmp(res2,&prev_regexp))
- {
- prev_regexp.copy(*res2);
- if (regex_compiled)
- {
- my_regfree(&preg);
- regex_compiled=0;
- }
- if (my_regcomp(&preg,res2->c_ptr_safe(),
- ((cmp_collation.collation->state &
- (MY_CS_BINSORT | MY_CS_CSSORT)) ?
- REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE),
- cmp_collation.collation))
- {
- null_value=1;
- return 0;
- }
- regex_compiled=1;
- }
+ res= &conv;
}
- null_value=0;
return my_regexec(&preg,res->c_ptr_safe(),0,(my_regmatch_t*) 0,0) ? 0 : 1;
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index e9aeef7fc3e..8df0e1af331 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1382,6 +1382,10 @@ class Item_func_regex :public Item_bool_func
bool regex_is_const;
String prev_regexp;
DTCollation cmp_collation;
+ CHARSET_INFO *regex_lib_charset;
+ int regex_lib_flags;
+ String conv;
+ bool regcomp(bool send_error);
public:
Item_func_regex(Item *a,Item *b) :Item_bool_func(a,b),
regex_compiled(0),regex_is_const(0) {}
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 1a6c15a4d2e..68d85418324 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -2612,35 +2612,27 @@ typedef struct
uint level;
String *pxml; // parsed XML
uint pos[MAX_LEVEL]; // Tag position stack
+ uint parent; // Offset of the parent of the current node
} MY_XML_USER_DATA;
-/*
- Find the parent node
-
- SYNOPSYS
- Find the parent node, i.e. a tag or attrubute node on the given level.
-
- RETURN
- 1 - success
- 0 - failure
-*/
-static uint xml_parent_tag(MY_XML_NODE *items, uint nitems, uint level)
+static bool
+append_node(String *str, MY_XML_NODE *node)
{
- if (!nitems)
- return 0;
-
- MY_XML_NODE *p, *last= &items[nitems-1];
- for (p= last; p >= items; p--)
- {
- if (p->level == level &&
- (p->type == MY_XML_NODE_TAG ||
- p->type == MY_XML_NODE_ATTR))
- {
- return p - items;
- }
- }
- return 0;
+ /*
+ If "str" doesn't have space for a new node,
+ it will allocate two times more space that it has had so far.
+ (2*len+512) is a heuristic value,
+ which gave the best performance during tests.
+ The ideas behind this formula are:
+ - It allows to have a very small number of reallocs:
+ about 10 reallocs on a 1Mb-long XML value.
+ - At the same time, it avoids excessive memory use.
+ */
+ if (str->reserve(sizeof(MY_XML_NODE), 2 * str->length() + 512))
+ return TRUE;
+ str->q_append((const char*) node, sizeof(MY_XML_NODE));
+ return FALSE;
}
@@ -2662,19 +2654,17 @@ extern "C" int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len);
int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
{
MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data;
- MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
uint numnodes= data->pxml->length() / sizeof(MY_XML_NODE);
- uint parent= xml_parent_tag(nodes, numnodes, data->level - 1);
MY_XML_NODE node;
+ node.parent= data->parent; // Set parent for the new node to old parent
+ data->parent= numnodes; // Remember current node as new parent
data->pos[data->level]= numnodes;
node.level= data->level++;
node.type= st->current_node_type; // TAG or ATTR
node.beg= attr;
node.end= attr + len;
- node.parent= parent;
- data->pxml->append((const char*) &node, sizeof(MY_XML_NODE));
- return MY_XML_OK;
+ return append_node(data->pxml, &node) ? MY_XML_ERROR : MY_XML_OK;
}
@@ -2695,18 +2685,14 @@ extern "C" int xml_value(MY_XML_PARSER *st,const char *attr, size_t len);
int xml_value(MY_XML_PARSER *st,const char *attr, size_t len)
{
MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data;
- MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
- uint numnodes= data->pxml->length() / sizeof(MY_XML_NODE);
- uint parent= xml_parent_tag(nodes, numnodes, data->level - 1);
MY_XML_NODE node;
+ node.parent= data->parent; // Set parent for the new text node to old parent
node.level= data->level;
node.type= MY_XML_NODE_TEXT;
node.beg= attr;
node.end= attr + len;
- node.parent= parent;
- data->pxml->append((const char*) &node, sizeof(MY_XML_NODE));
- return MY_XML_OK;
+ return append_node(data->pxml, &node) ? MY_XML_ERROR : MY_XML_OK;
}
@@ -2731,6 +2717,7 @@ int xml_leave(MY_XML_PARSER *st,const char *attr, size_t len)
data->level--;
MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
+ data->parent= nodes[data->parent].parent;
nodes+= data->pos[data->level];
nodes->tagend= st->cur;
@@ -2761,6 +2748,7 @@ String *Item_xml_str_func::parse_xml(String *raw_xml, String *parsed_xml_buf)
p.flags= MY_XML_FLAG_RELATIVE_NAMES | MY_XML_FLAG_SKIP_TEXT_NORMALIZATION;
user_data.level= 0;
user_data.pxml= parsed_xml_buf;
+ user_data.parent= 0;
my_xml_set_enter_handler(&p, xml_enter);
my_xml_set_value_handler(&p, xml_value);
my_xml_set_leave_handler(&p, xml_leave);
diff --git a/sql/log.cc b/sql/log.cc
index 688ed03d5d1..9fdede9ef2c 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2158,13 +2158,9 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
{
if (!log_name || !log_name[0])
{
- /*
- TODO: The following should be using fn_format(); We just need to
- first change fn_format() to cut the file name if it's too long.
- */
- strmake(buff, pidfile_name, FN_REFLEN - 5);
- strmov(fn_ext(buff), suffix);
- return (const char *)buff;
+ strmake(buff, pidfile_name, FN_REFLEN - strlen(suffix) - 1);
+ return (const char *)
+ fn_format(buff, buff, "", suffix, MYF(MY_REPLACE_EXT|MY_REPLACE_DIR));
}
// get rid of extension if the log is binary to avoid problems
if (strip_ext)
@@ -3569,9 +3565,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
(!binlog_filter->db_ok(local_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_PRINT("info",("OPTION_BIN_LOG is %s, db_ok('%s') == %d",
- (thd->options & OPTION_BIN_LOG) ? "set" : "clear",
- local_db, binlog_filter->db_ok(local_db)));
DBUG_RETURN(0);
}
#endif /* HAVE_REPLICATION */
diff --git a/sql/log.h b/sql/log.h
index bef0101c8b5..20a1b7e8e6d 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -130,7 +130,13 @@ typedef struct st_log_info
my_off_t pos;
bool fatal; // if the purge happens to give us a negative offset
pthread_mutex_t lock;
- st_log_info():fatal(0) { pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);}
+ st_log_info()
+ : index_file_offset(0), index_file_start_offset(0),
+ pos(0), fatal(0)
+ {
+ log_file_name[0] = '\0';
+ pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);
+ }
~st_log_info() { pthread_mutex_destroy(&lock);}
} LOG_INFO;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index a6d07e72033..2b3037aedcc 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -36,6 +36,64 @@
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) && !defined(DBUG_OFF) && !defined(_lint)
+static const char *HA_ERR(int i)
+{
+ switch (i) {
+ case HA_ERR_KEY_NOT_FOUND: return "HA_ERR_KEY_NOT_FOUND";
+ case HA_ERR_FOUND_DUPP_KEY: return "HA_ERR_FOUND_DUPP_KEY";
+ case HA_ERR_RECORD_CHANGED: return "HA_ERR_RECORD_CHANGED";
+ case HA_ERR_WRONG_INDEX: return "HA_ERR_WRONG_INDEX";
+ case HA_ERR_CRASHED: return "HA_ERR_CRASHED";
+ case HA_ERR_WRONG_IN_RECORD: return "HA_ERR_WRONG_IN_RECORD";
+ case HA_ERR_OUT_OF_MEM: return "HA_ERR_OUT_OF_MEM";
+ case HA_ERR_NOT_A_TABLE: return "HA_ERR_NOT_A_TABLE";
+ case HA_ERR_WRONG_COMMAND: return "HA_ERR_WRONG_COMMAND";
+ case HA_ERR_OLD_FILE: return "HA_ERR_OLD_FILE";
+ case HA_ERR_NO_ACTIVE_RECORD: return "HA_ERR_NO_ACTIVE_RECORD";
+ case HA_ERR_RECORD_DELETED: return "HA_ERR_RECORD_DELETED";
+ case HA_ERR_RECORD_FILE_FULL: return "HA_ERR_RECORD_FILE_FULL";
+ case HA_ERR_INDEX_FILE_FULL: return "HA_ERR_INDEX_FILE_FULL";
+ case HA_ERR_END_OF_FILE: return "HA_ERR_END_OF_FILE";
+ case HA_ERR_UNSUPPORTED: return "HA_ERR_UNSUPPORTED";
+ case HA_ERR_TO_BIG_ROW: return "HA_ERR_TO_BIG_ROW";
+ case HA_WRONG_CREATE_OPTION: return "HA_WRONG_CREATE_OPTION";
+ case HA_ERR_FOUND_DUPP_UNIQUE: return "HA_ERR_FOUND_DUPP_UNIQUE";
+ case HA_ERR_UNKNOWN_CHARSET: return "HA_ERR_UNKNOWN_CHARSET";
+ case HA_ERR_WRONG_MRG_TABLE_DEF: return "HA_ERR_WRONG_MRG_TABLE_DEF";
+ case HA_ERR_CRASHED_ON_REPAIR: return "HA_ERR_CRASHED_ON_REPAIR";
+ case HA_ERR_CRASHED_ON_USAGE: return "HA_ERR_CRASHED_ON_USAGE";
+ case HA_ERR_LOCK_WAIT_TIMEOUT: return "HA_ERR_LOCK_WAIT_TIMEOUT";
+ case HA_ERR_LOCK_TABLE_FULL: return "HA_ERR_LOCK_TABLE_FULL";
+ case HA_ERR_READ_ONLY_TRANSACTION: return "HA_ERR_READ_ONLY_TRANSACTION";
+ case HA_ERR_LOCK_DEADLOCK: return "HA_ERR_LOCK_DEADLOCK";
+ case HA_ERR_CANNOT_ADD_FOREIGN: return "HA_ERR_CANNOT_ADD_FOREIGN";
+ case HA_ERR_NO_REFERENCED_ROW: return "HA_ERR_NO_REFERENCED_ROW";
+ case HA_ERR_ROW_IS_REFERENCED: return "HA_ERR_ROW_IS_REFERENCED";
+ case HA_ERR_NO_SAVEPOINT: return "HA_ERR_NO_SAVEPOINT";
+ case HA_ERR_NON_UNIQUE_BLOCK_SIZE: return "HA_ERR_NON_UNIQUE_BLOCK_SIZE";
+ case HA_ERR_NO_SUCH_TABLE: return "HA_ERR_NO_SUCH_TABLE";
+ case HA_ERR_TABLE_EXIST: return "HA_ERR_TABLE_EXIST";
+ case HA_ERR_NO_CONNECTION: return "HA_ERR_NO_CONNECTION";
+ case HA_ERR_NULL_IN_SPATIAL: return "HA_ERR_NULL_IN_SPATIAL";
+ case HA_ERR_TABLE_DEF_CHANGED: return "HA_ERR_TABLE_DEF_CHANGED";
+ case HA_ERR_NO_PARTITION_FOUND: return "HA_ERR_NO_PARTITION_FOUND";
+ case HA_ERR_RBR_LOGGING_FAILED: return "HA_ERR_RBR_LOGGING_FAILED";
+ case HA_ERR_DROP_INDEX_FK: return "HA_ERR_DROP_INDEX_FK";
+ case HA_ERR_FOREIGN_DUPLICATE_KEY: return "HA_ERR_FOREIGN_DUPLICATE_KEY";
+ case HA_ERR_TABLE_NEEDS_UPGRADE: return "HA_ERR_TABLE_NEEDS_UPGRADE";
+ case HA_ERR_TABLE_READONLY: return "HA_ERR_TABLE_READONLY";
+ case HA_ERR_AUTOINC_READ_FAILED: return "HA_ERR_AUTOINC_READ_FAILED";
+ case HA_ERR_AUTOINC_ERANGE: return "HA_ERR_AUTOINC_ERANGE";
+ case HA_ERR_GENERIC: return "HA_ERR_GENERIC";
+ case HA_ERR_RECORD_IS_THE_SAME: return "HA_ERR_RECORD_IS_THE_SAME";
+ case HA_ERR_LOGGING_IMPOSSIBLE: return "HA_ERR_LOGGING_IMPOSSIBLE";
+ case HA_ERR_CORRUPT_EVENT: return "HA_ERR_CORRUPT_EVENT";
+ }
+ return "<unknown error>";
+}
+#endif
+
/*
Cache that will automatically be written to a dedicated file on
destruction.
@@ -114,6 +172,9 @@ private:
flag_set m_flags;
};
+#ifndef DBUG_OFF
+uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation
+#endif
/*
pretty_print_str()
@@ -555,8 +616,32 @@ int Log_event::do_update_pos(Relay_log_info *rli)
Matz: I don't think we will need this check with this refactoring.
*/
if (rli)
- rli->stmt_done(log_pos, when);
-
+ {
+ /*
+ bug#29309 simulation: resetting the flag to force
+ wrong behaviour of artificial event to update
+ rli->last_master_timestamp for only one time -
+ the first FLUSH LOGS in the test.
+ */
+ DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
+ if (debug_not_change_ts_if_art_event == 1
+ && is_artificial_event())
+ {
+ debug_not_change_ts_if_art_event= 0;
+ });
+#ifndef DBUG_OFF
+ rli->stmt_done(log_pos,
+ is_artificial_event() &&
+ debug_not_change_ts_if_art_event > 0 ? 0 : when);
+#else
+ rli->stmt_done(log_pos, is_artificial_event()? 0 : when);
+#endif
+ DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
+ if (debug_not_change_ts_if_art_event == 0)
+ {
+ debug_not_change_ts_if_art_event= 2;
+ });
+ }
return 0; // Cannot fail currently
}
@@ -570,7 +655,8 @@ Log_event::do_shall_skip(Relay_log_info *rli)
(ulong) server_id, (ulong) ::server_id,
rli->replicate_same_server_id,
rli->slave_skip_counter));
- if (server_id == ::server_id && !rli->replicate_same_server_id)
+ if (server_id == ::server_id && !rli->replicate_same_server_id ||
+ rli->slave_skip_counter == 1 && rli->is_in_group())
return EVENT_SKIP_IGNORE;
else if (rli->slave_skip_counter > 0)
return EVENT_SKIP_COUNT;
@@ -1227,6 +1313,16 @@ void Log_event::print_timestamp(IO_CACHE* file, time_t* ts)
#endif /* MYSQL_CLIENT */
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+inline Log_event::enum_skip_reason
+Log_event::continue_group(Relay_log_info *rli)
+{
+ if (rli->slave_skip_counter == 1)
+ return Log_event::EVENT_SKIP_IGNORE;
+ return Log_event::do_shall_skip(rli);
+}
+#endif
+
/**************************************************************************
Query_log_event methods
**************************************************************************/
@@ -1290,6 +1386,11 @@ static void write_str_with_code_and_len(char **dst, const char *src,
bool Query_log_event::write(IO_CACHE* file)
{
+ /**
+ @todo if catalog can be of length FN_REFLEN==512, then we are not
+ replicating it correctly, since the length is stored in a byte
+ /sven
+ */
uchar buf[QUERY_HEADER_LEN+
1+4+ // code of flags2 and flags2
1+8+ // code of sql_mode and sql_mode
@@ -1516,6 +1617,10 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
time(&end_time);
exec_time = (ulong) (end_time - thd_arg->start_time);
+ /**
+ @todo this means that if we have no catalog, then it is replicated
+ as an existing catalog of length zero. is that safe? /sven
+ */
catalog_len = (catalog) ? (uint32) strlen(catalog) : 0;
/* status_vars_len is set just before writing the event */
db_len = (db) ? (uint32) strlen(db) : 0;
@@ -1525,7 +1630,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
/*
If we don't use flags2 for anything else than options contained in
thd_arg->options, it would be more efficient to flags2=thd_arg->options
- (OPTIONS_WRITTEN_TO_BINLOG would be used only at reading time).
+ (OPTIONS_WRITTEN_TO_BIN_LOG would be used only at reading time).
But it's likely that we don't want to use 32 bits for 3 bits; in the future
we will probably want to reclaim the 29 bits. So we need the &.
*/
@@ -1556,18 +1661,48 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
/* 2 utility functions for the next method */
-/*
- Get the pointer for a string (src) that contains the length in
- the first byte. Set the output string (dst) to the string value
- and place the length of the string in the byte after the string.
+/**
+ Read a string with length from memory.
+
+ This function reads the string-with-length stored at
+ <code>src</code> and extract the length into <code>*len</code> and
+ a pointer to the start of the string into <code>*dst</code>. The
+ string can then be copied using <code>memcpy()</code> with the
+ number of bytes given in <code>*len</code>.
+
+ @param src Pointer to variable holding a pointer to the memory to
+ read the string from.
+ @param dst Pointer to variable holding a pointer where the actual
+ string starts. Starting from this position, the string
+ can be copied using @c memcpy().
+ @param len Pointer to variable where the length will be stored.
+ @param end One-past-the-end of the memory where the string is
+ stored.
+
+ @return Zero if the entire string can be copied successfully,
+ @c UINT_MAX if the length could not be read from memory
+ (that is, if <code>*src >= end</code>), otherwise the
+ number of bytes that are missing to read the full
+ string, which happends <code>*dst + *len >= end</code>.
*/
-static void get_str_len_and_pointer(const Log_event::Byte **src,
- const char **dst,
- uint *len)
-{
- if ((*len= **src))
- *dst= (char *)*src + 1; // Will be copied later
- (*src)+= *len + 1;
+static int
+get_str_len_and_pointer(const Log_event::Byte **src,
+ const char **dst,
+ uint *len,
+ const Log_event::Byte *end)
+{
+ if (*src >= end)
+ return -1; // Will be UINT_MAX in two-complement arithmetics
+ uint length= **src;
+ if (length > 0)
+ {
+ if (*src + length >= end)
+ return *src + length - end + 1; // Number of bytes missing
+ *dst= (char *)*src + 1; // Will be copied later
+ }
+ *len= length;
+ *src+= length + 1;
+ return 0;
}
static void copy_str_and_move(const char **src,
@@ -1580,6 +1715,46 @@ static void copy_str_and_move(const char **src,
*(*dst)++= 0;
}
+
+#ifndef DBUG_OFF
+static char const *
+code_name(int code)
+{
+ static char buf[255];
+ switch (code) {
+ case Q_FLAGS2_CODE: return "Q_FLAGS2_CODE";
+ case Q_SQL_MODE_CODE: return "Q_SQL_MODE_CODE";
+ case Q_CATALOG_CODE: return "Q_CATALOG_CODE";
+ case Q_AUTO_INCREMENT: return "Q_AUTO_INCREMENT";
+ case Q_CHARSET_CODE: return "Q_CHARSET_CODE";
+ case Q_TIME_ZONE_CODE: return "Q_TIME_ZONE_CODE";
+ case Q_CATALOG_NZ_CODE: return "Q_CATALOG_NZ_CODE";
+ case Q_LC_TIME_NAMES_CODE: return "Q_LC_TIME_NAMES_CODE";
+ case Q_CHARSET_DATABASE_CODE: return "Q_CHARSET_DATABASE_CODE";
+ }
+ sprintf(buf, "CODE#%d", code);
+ return buf;
+}
+#endif
+
+/**
+ Macro to check that there is enough space to read from memory.
+
+ @param PTR Pointer to memory
+ @param END End of memory
+ @param CNT Number of bytes that should be read.
+ */
+#define CHECK_SPACE(PTR,END,CNT) \
+ do { \
+ DBUG_PRINT("info", ("Read %s", code_name(pos[-1]))); \
+ DBUG_ASSERT((PTR) + (CNT) <= (END)); \
+ if ((PTR) + (CNT) > (END)) { \
+ DBUG_PRINT("info", ("query= 0")); \
+ query= 0; \
+ DBUG_VOID_RETURN; \
+ } \
+ } while (0)
+
/*
Query_log_event::Query_log_event()
This is used by the SQL slave thread to prepare the event before execution.
@@ -1632,6 +1807,19 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
if (tmp)
{
status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
+ /*
+ Check if status variable length is corrupt and will lead to very
+ wrong data. We could be even more strict and require data_len to
+ be even bigger, but this will suffice to catch most corruption
+ errors that can lead to a crash.
+ */
+ if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS))
+ {
+ DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
+ status_vars_len, data_len));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
data_len-= status_vars_len;
DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
(uint) status_vars_len));
@@ -1651,6 +1839,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
{
switch (*pos++) {
case Q_FLAGS2_CODE:
+ CHECK_SPACE(pos, end, 4);
flags2_inited= 1;
flags2= uint4korr(pos);
DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2));
@@ -1661,6 +1850,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
#ifndef DBUG_OFF
char buff[22];
#endif
+ CHECK_SPACE(pos, end, 8);
sql_mode_inited= 1;
sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong
DBUG_PRINT("info",("In Query_log_event, read sql_mode: %s",
@@ -1669,15 +1859,24 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
break;
}
case Q_CATALOG_NZ_CODE:
- get_str_len_and_pointer(&pos, &catalog, &catalog_len);
+ DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos: 0x%lx; end: 0x%lx",
+ (ulong) pos, (ulong) end));
+ if (get_str_len_and_pointer(&pos, &catalog, &catalog_len, end))
+ {
+ DBUG_PRINT("info", ("query= 0"));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
break;
case Q_AUTO_INCREMENT:
+ CHECK_SPACE(pos, end, 4);
auto_increment_increment= uint2korr(pos);
auto_increment_offset= uint2korr(pos+2);
pos+= 4;
break;
case Q_CHARSET_CODE:
{
+ CHECK_SPACE(pos, end, 6);
charset_inited= 1;
memcpy(charset, pos, 6);
pos+= 6;
@@ -1685,20 +1884,29 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
}
case Q_TIME_ZONE_CODE:
{
- get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len);
+ if (get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len, end))
+ {
+ DBUG_PRINT("info", ("Q_TIME_ZONE_CODE: query= 0"));
+ query= 0;
+ DBUG_VOID_RETURN;
+ }
break;
}
case Q_CATALOG_CODE: /* for 5.0.x where 0<=x<=3 masters */
+ CHECK_SPACE(pos, end, 1);
if ((catalog_len= *pos))
catalog= (char*) pos+1; // Will be copied later
+ CHECK_SPACE(pos, end, catalog_len + 2);
pos+= catalog_len+2; // leap over end 0
catalog_nz= 0; // catalog has end 0 in event
break;
case Q_LC_TIME_NAMES_CODE:
+ CHECK_SPACE(pos, end, 2);
lc_time_names_number= uint2korr(pos);
pos+= 2;
break;
case Q_CHARSET_DATABASE_CODE:
+ CHECK_SPACE(pos, end, 2);
charset_database_number= uint2korr(pos);
pos+= 2;
break;
@@ -1726,6 +1934,11 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
DBUG_VOID_RETURN;
if (catalog_len) // If catalog is given
{
+ /**
+ @todo we should clean up and do only copy_str_and_move; it
+ works for both cases. Then we can remove the catalog_nz
+ flag. /sven
+ */
if (likely(catalog_nz)) // true except if event comes from 5.0.0|1|2|3.
copy_str_and_move(&catalog, &start, catalog_len);
else
@@ -1738,6 +1951,13 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
if (time_zone_len)
copy_str_and_move(&time_zone_str, &start, time_zone_len);
+ /**
+ if time_zone_len or catalog_len are 0, then time_zone and catalog
+ are uninitialized at this point. shouldn't they point to the
+ zero-length null-terminated strings we allocated space for in the
+ my_alloc call above? /sven
+ */
+
/* A 2nd variable part; this is common to all versions */
memcpy((char*) start, end, data_len); // Copy db and query
start[data_len]= '\0'; // End query with \0 (For safetly)
@@ -2200,6 +2420,7 @@ end:
*/
thd->catalog= 0;
thd->set_db(NULL, 0); /* will free the current database */
+ DBUG_PRINT("info", ("end: query= 0"));
thd->query= 0; // just to be sure
thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
@@ -2235,6 +2456,30 @@ int Query_log_event::do_update_pos(Relay_log_info *rli)
}
+Log_event::enum_skip_reason
+Query_log_event::do_shall_skip(Relay_log_info *rli)
+{
+ DBUG_ENTER("Query_log_event::do_shall_skip");
+ DBUG_PRINT("debug", ("query: %s; q_len: %d", query, q_len));
+ DBUG_ASSERT(query && q_len > 0);
+
+ if (rli->slave_skip_counter > 0)
+ {
+ if (strcmp("BEGIN", query) == 0)
+ {
+ thd->options|= OPTION_BEGIN;
+ DBUG_RETURN(Log_event::continue_group(rli));
+ }
+
+ if (strcmp("COMMIT", query) == 0 || strcmp("ROLLBACK", query) == 0)
+ {
+ thd->options&= ~OPTION_BEGIN;
+ DBUG_RETURN(Log_event::EVENT_SKIP_COUNT);
+ }
+ }
+ DBUG_RETURN(Log_event::do_shall_skip(rli));
+}
+
#endif
@@ -2774,7 +3019,7 @@ uint Load_log_event::get_query_buffer_length()
21 + sql_ex.field_term_len*4 + 2 + // " FIELDS TERMINATED BY 'str'"
23 + sql_ex.enclosed_len*4 + 2 + // " OPTIONALLY ENCLOSED BY 'str'"
12 + sql_ex.escaped_len*4 + 2 + // " ESCAPED BY 'str'"
- 21 + sql_ex.line_term_len*4 + 2 + // " FIELDS TERMINATED BY 'str'"
+ 21 + sql_ex.line_term_len*4 + 2 + // " LINES TERMINATED BY 'str'"
19 + sql_ex.line_start_len*4 + 2 + // " LINES STARTING BY 'str'"
15 + 22 + // " IGNORE xxx LINES"
3 + (num_fields-1)*2 + field_block_len; // " (field1, field2, ...)"
@@ -3871,10 +4116,7 @@ Intvar_log_event::do_shall_skip(Relay_log_info *rli)
that we do not change the value of the slave skip counter since it
will be decreased by the following insert event.
*/
- if (rli->slave_skip_counter == 1)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rli);
+ return continue_group(rli);
}
#endif
@@ -3970,10 +4212,7 @@ Rand_log_event::do_shall_skip(Relay_log_info *rli)
that we do not change the value of the slave skip counter since it
will be decreased by the following insert event.
*/
- if (rli->slave_skip_counter == 1)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rli);
+ return continue_group(rli);
}
#endif /* !MYSQL_CLIENT */
@@ -4049,6 +4288,17 @@ int Xid_log_event::do_apply_event(Relay_log_info const *rli)
"COMMIT /* implicit, from Xid_log_event */");
return end_trans(thd, COMMIT);
}
+
+Log_event::enum_skip_reason
+Xid_log_event::do_shall_skip(Relay_log_info *rli)
+{
+ DBUG_ENTER("Xid_log_event::do_shall_skip");
+ if (rli->slave_skip_counter > 0) {
+ thd->options&= ~OPTION_BEGIN;
+ DBUG_RETURN(Log_event::EVENT_SKIP_COUNT);
+ }
+ DBUG_RETURN(Log_event::do_shall_skip(rli));
+}
#endif /* !MYSQL_CLIENT */
@@ -4427,10 +4677,7 @@ User_var_log_event::do_shall_skip(Relay_log_info *rli)
that we do not change the value of the slave skip counter since it
will be decreased by the following insert event.
*/
- if (rli->slave_skip_counter == 1)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rli);
+ return continue_group(rli);
}
#endif /* !MYSQL_CLIENT */
@@ -5366,6 +5613,19 @@ int Begin_load_query_log_event::get_create_or_append() const
#endif /* defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+Log_event::enum_skip_reason
+Begin_load_query_log_event::do_shall_skip(Relay_log_info *rli)
+{
+ /*
+ If the slave skip counter is 1, then we should not start executing
+ on the next event.
+ */
+ return continue_group(rli);
+}
+#endif
+
+
/**************************************************************************
Execute_load_query_log_event methods
**************************************************************************/
@@ -5374,12 +5634,13 @@ int Begin_load_query_log_event::get_create_or_append() const
#ifndef MYSQL_CLIENT
Execute_load_query_log_event::
Execute_load_query_log_event(THD *thd_arg, const char* query_arg,
- ulong query_length_arg, uint fn_pos_start_arg,
- uint fn_pos_end_arg,
- enum_load_dup_handling dup_handling_arg,
- bool using_trans, bool suppress_use):
+ ulong query_length_arg, uint fn_pos_start_arg,
+ uint fn_pos_end_arg,
+ enum_load_dup_handling dup_handling_arg,
+ bool using_trans, bool suppress_use,
+ THD::killed_state killed_err_arg):
Query_log_event(thd_arg, query_arg, query_length_arg, using_trans,
- suppress_use),
+ suppress_use, killed_err_arg),
file_id(thd_arg->file_id), fn_pos_start(fn_pos_start_arg),
fn_pos_end(fn_pos_end_arg), dup_handling(dup_handling_arg)
{
@@ -5577,6 +5838,10 @@ bool sql_ex_info::write_data(IO_CACHE* file)
}
else
{
+ /**
+ @todo This is sensitive to field padding. We should write a
+ char[7], not an old_sql_ex. /sven
+ */
old_sql_ex old_ex;
old_ex.field_term= *field_term;
old_ex.enclosed= *enclosed;
@@ -6146,14 +6411,19 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
table->in_use = old_thd;
switch (error)
{
- /* Some recoverable errors */
- case HA_ERR_RECORD_CHANGED:
- case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
- tuple does not exist */
- error= 0;
case 0:
break;
+ /* Some recoverable errors */
+ case HA_ERR_RECORD_CHANGED:
+ case HA_ERR_RECORD_DELETED:
+ case HA_ERR_KEY_NOT_FOUND:
+ case HA_ERR_END_OF_FILE:
+ /* Idempotency support: OK if tuple does not exist */
+ DBUG_PRINT("info", ("error: %s", HA_ERR(error)));
+ error= 0;
+ break;
+
default:
rli->report(ERROR_LEVEL, thd->net.last_errno,
"Error in %s event: row application failed. %s",
@@ -6170,6 +6440,10 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
m_curr_row_end.
*/
+ DBUG_PRINT("info", ("error: %d", error));
+ DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu",
+ (ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end));
+
if (!m_curr_row_end && !error)
unpack_current_row(rli);
@@ -6469,6 +6743,16 @@ void Rows_log_event::print_helper(FILE *file,
data) in the table map are initialized as zero (0). The array size is the
same as the columns for the table on the slave.
+ Additionally, values saved for field metadata on the master are saved as a
+ string of bytes (uchar) in the binlog. A field may require 1 or more bytes
+ to store the information. In cases where values require multiple bytes
+ (e.g. values > 255), the endian-safe methods are used to properly encode
+ the values on the master and decode them on the slave. When the field
+ metadata values are captured on the slave, they are stored in an array of
+ type uint16. This allows the least number of casts to prevent casting bugs
+ when the field metadata is used in comparisons of field attributes. When
+ the field metadata is used for calculating addresses in pointer math, the
+ type used is uint32.
*/
/**
@@ -6866,10 +7150,7 @@ Table_map_log_event::do_shall_skip(Relay_log_info *rli)
If the slave skip counter is 1, then we should not start executing
on the next event.
*/
- if (rli->slave_skip_counter == 1)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rli);
+ return continue_group(rli);
}
int Table_map_log_event::do_update_pos(Relay_log_info *rli)
@@ -7383,6 +7664,9 @@ static bool record_compare(TABLE *table)
records. Check that the other engines also return correct records.
*/
+ DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
+ DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
+
bool result= FALSE;
uchar saved_x[2], saved_filler[2];
@@ -7471,7 +7755,7 @@ record_compare_exit:
int Rows_log_event::find_row(const Relay_log_info *rli)
{
- DBUG_ENTER("find_row");
+ DBUG_ENTER("Rows_log_event::find_row");
DBUG_ASSERT(m_table && m_table->in_use != NULL);
@@ -7700,7 +7984,7 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
DBUG_DUMP("record found", table->record[0], table->s->reclength);
table->file->ha_rnd_end();
- DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
+ DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == HA_ERR_RECORD_DELETED || error == 0);
DBUG_RETURN(error);
}
@@ -7900,7 +8184,15 @@ Update_rows_log_event::do_exec_row(const Relay_log_info *const rli)
int error= find_row(rli);
if (error)
+ {
+ /*
+ We need to read the second image in the event of error to be
+ able to skip to the next pair of updates
+ */
+ m_curr_row= m_curr_row_end;
+ unpack_current_row(rli);
return error;
+ }
/*
This is the situation after locating BI:
diff --git a/sql/log_event.h b/sql/log_event.h
index 0c66d1b190f..4bd496af2a4 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -18,8 +18,10 @@
@{
@file
-
- Binary log event definitions.
+
+ @brief Binary log event definitions. This includes generic code
+ common to all types of log events, as well as specific code for each
+ type of log event.
*/
@@ -37,6 +39,23 @@
#include "rpl_reporting.h"
#endif
+/**
+ Either assert or return an error.
+
+ In debug build, the condition will be checked, but in non-debug
+ builds, the error code given will be returned instead.
+
+ @param COND Condition to check
+ @param ERRNO Error number to return in non-debug builds
+*/
+#ifdef DBUG_OFF
+#define ASSERT_OR_RETURN_ERROR(COND, ERRNO) \
+ do { if (!(COND)) return ERRNO; } while (0)
+#else
+#define ASSERT_OR_RETURN_ERROR(COND, ERRNO) \
+ DBUG_ASSERT(COND)
+#endif
+
#define LOG_READ_EOF -1
#define LOG_READ_BOGUS -2
#define LOG_READ_IO -3
@@ -394,15 +413,19 @@ struct sql_ex_info
#define LOG_EVENT_BINLOG_IN_USE_F 0x1
-/*
- If the query depends on the thread (for example: TEMPORARY TABLE).
- Currently this is used by mysqlbinlog to know it must print
- SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it
- for every query but this would be slow).
+/**
+ @def LOG_EVENT_THREAD_SPECIFIC_F
+
+ If the query depends on the thread (for example: TEMPORARY TABLE).
+ Currently this is used by mysqlbinlog to know it must print
+ SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it
+ for every query but this would be slow).
*/
#define LOG_EVENT_THREAD_SPECIFIC_F 0x4
-/*
+/**
+ @def LOG_EVENT_SUPPRESS_USE_F
+
Suppress the generation of 'USE' statements before the actual
statement. This flag should be set for any events that does not need
the current database set to function correctly. Most notable cases
@@ -421,23 +444,26 @@ struct sql_ex_info
*/
#define LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F 0x10
-/*
- OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must be
- written to the binlog. OPTIONS_WRITTEN_TO_BINLOG could be written
- into the Format_description_log_event, so that if later we don't want
- to replicate a variable we did replicate, or the contrary, it's
- doable. But it should not be too hard to decide once for all of what
- we replicate and what we don't, among the fixed 32 bits of
- thd->options.
- I (Guilhem) have read through every option's usage, and it looks like
- OPTION_AUTO_IS_NULL and OPTION_NO_FOREIGN_KEYS are the only ones
- which alter how the query modifies the table. It's good to replicate
- OPTION_RELAXED_UNIQUE_CHECKS too because otherwise, the slave may
- insert data slower than the master, in InnoDB.
- OPTION_BIG_SELECTS is not needed (the slave thread runs with
- max_join_size=HA_POS_ERROR) and OPTION_BIG_TABLES is not needed
- either, as the manual says (because a too big in-memory temp table is
- automatically written to disk).
+/**
+ @def OPTIONS_WRITTEN_TO_BIN_LOG
+
+ OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must
+ be written to the binlog. OPTIONS_WRITTEN_TO_BIN_LOG could be
+ written into the Format_description_log_event, so that if later we
+ don't want to replicate a variable we did replicate, or the
+ contrary, it's doable. But it should not be too hard to decide once
+ for all of what we replicate and what we don't, among the fixed 32
+ bits of thd->options.
+
+ I (Guilhem) have read through every option's usage, and it looks
+ like OPTION_AUTO_IS_NULL and OPTION_NO_FOREIGN_KEYS are the only
+ ones which alter how the query modifies the table. It's good to
+ replicate OPTION_RELAXED_UNIQUE_CHECKS too because otherwise, the
+ slave may insert data slower than the master, in InnoDB.
+ OPTION_BIG_SELECTS is not needed (the slave thread runs with
+ max_join_size=HA_POS_ERROR) and OPTION_BIG_TABLES is not needed
+ either, as the manual says (because a too big in-memory temp table
+ is automatically written to disk).
*/
#define OPTIONS_WRITTEN_TO_BIN_LOG \
(OPTION_AUTO_IS_NULL | OPTION_NO_FOREIGN_KEY_CHECKS | \
@@ -452,6 +478,11 @@ struct sql_ex_info
#endif
#undef EXPECTED_OPTIONS /* You shouldn't use this one */
+/**
+ @enum Log_event_type
+
+ Enumeration type for the different types of log events.
+*/
enum Log_event_type
{
/*
@@ -612,13 +643,90 @@ typedef struct st_print_event_info
#endif
-/*****************************************************************************
-
- Log_event class
+/**
+ @class Log_event
This is the abstract base class for binary log events.
-
- ****************************************************************************/
+
+ @section Log_event_binary_format Binary Format
+
+ Any Log_event saved on disk consists of the following three
+ components.
+
+ @li Common-Header
+ @li Post-Header
+ @li Body
+
+ The Common-Header, documented below, always has the same form and
+ length within one version of MySQL. Each event type specifies a
+ form and length of the Post-Header common to all events of the type.
+ The Body may be of different form and length even for different
+ events of the same type. The binary formats of Post-Header and Body
+ are documented separately in each subclass. The binary format of
+ Common-Header is as follows.
+
+ <table>
+ <caption>Common-Header</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Format<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>timestamp</td>
+ <td>4 byte unsigned integer</td>
+ <td>The number of seconds since 1970.
+ </td>
+ </tr>
+
+ <tr>
+ <td>type</td>
+ <td>1 byte enumeration</td>
+ <td>See enum #Log_event_type.</td>
+ </tr>
+
+ <tr>
+ <td>master_id</td>
+ <td>4 byte integer</td>
+ <td>Server ID of the server that created the event.</td>
+ </tr>
+
+ <tr>
+ <td>total_size</td>
+ <td>4 byte integer</td>
+ <td>The total size of this event, in bytes. In other words, this
+ is the sum of the sizes of Common-Header, Post-Header, and Body.
+ </td>
+ </tr>
+
+ <tr>
+ <td>master_position</td>
+ <td>4 byte integer</td>
+ <td>The position of the next event in the master binary log, in
+ bytes from the beginning of the file.
+ </td>
+ </tr>
+
+ <tr>
+ <td>flags</td>
+ <td>2 byte bitfield</td>
+ <td>See Log_event::flags.</td>
+ </tr>
+ </table>
+
+ Summing up the numbers above, we see that the total size of the
+ common header is 19 bytes.
+
+ @subsection Log_event_endianness_and_string_formats Endianness and String Formats
+
+ All numbers, whether they are 16-, 32-, or 64-bit, are stored in
+ little endian, i.e., the least significant byte first.
+
+ Strings are stored in various formats. The format of each string is
+ documented separately.
+*/
class Log_event
{
public:
@@ -692,8 +800,8 @@ public:
*/
uint32 server_id;
- /*
- Some 16 flags. Look above for LOG_EVENT_TIME_F,
+ /**
+ Some 16 flags. See the definitions above for LOG_EVENT_TIME_F,
LOG_EVENT_FORCED_ROTATE_F, LOG_EVENT_THREAD_SPECIFIC_F, and
LOG_EVENT_SUPPRESS_USE_F for notes.
*/
@@ -871,6 +979,25 @@ public:
protected:
/**
+ Helper function to ignore an event w.r.t. the slave skip counter.
+
+ This function can be used inside do_shall_skip() for functions
+ that cannot end a group. If the slave skip counter is 1 when
+ seeing such an event, the event shall be ignored, the counter
+ left intact, and processing continue with the next event.
+
+ A typical usage is:
+ @code
+ enum_skip_reason do_shall_skip(Relay_log_info *rli) {
+ return continue_group(rli);
+ }
+ @endcode
+
+ @return Skip reason
+ */
+ enum_skip_reason continue_group(Relay_log_info *rli);
+
+ /**
Primitive to apply an event to the database.
This is where the change to the database is made.
@@ -950,6 +1077,7 @@ protected:
#endif
};
+
/*
One class for each type of event.
Two constructors for each class:
@@ -963,13 +1091,332 @@ protected:
mysqlbinlog. This constructor must be format-tolerant.
*/
-/*****************************************************************************
-
- Query Log Event class
-
- Logs SQL queries
+/**
+ @class Query_log_event
+
+ Logs SQL queries.
+
+ @section Query_log_event_binary_format Binary format
+
+ The Post-Header has five components:
+
+ <table>
+ <caption>Post-Header for Query_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>slave_proxy_id</td>
+ <td>4 byte unsigned integer</td>
+ <td>An integer identifying the client thread, which is unique on
+ the server. (Note, however, that two threads on different servers
+ may have the same slave_proxy_id.) This is used when a client
+ thread creates a temporary table. Temporary tables are local to
+ the client, and the slave_proxy_id is used to distinguish
+ temporary tables belonging to different clients.
+ </td>
+ </tr>
+
+ <tr>
+ <td>exec_time</td>
+ <td>4 byte integer</td>
+ <td>???TODO</td>
+ </tr>
+
+ <tr>
+ <td>db_len</td>
+ <td>1 byte integer</td>
+ <td>The length of the name of the currently selected
+ database.
+ </td>
+ </tr>
+
+ <tr>
+ <td>error_code</td>
+ <td>2 byte integer</td>
+ <td>Error code generated by the master. If the master fails, the
+ slave will fail with the same error code, except for the error
+ codes ER_DB_CREATE_EXISTS==1007 and ER_DB_DROP_EXISTS==1008.
+ </td>
+ </tr>
+
+ <tr>
+ <td>status_vars_len</td>
+ <td>2 byte integer</td>
+ <td>The length of the status_vars block of the Body, in bytes. See
+ <a href="#query_log_event_status_vars">below</a>.
+ </td>
+ </tr>
+
+ <tr>
+ <td>Post-Header-For-Derived</td>
+ <td>0 bytes</td>
+ <td>This field is only written by the subclass
+ Execute_load_query_log_event. In this base class, it takes 0
+ bytes. See separate documentation for
+ Execute_load_query_log_event.
+ </td>
+ </tr>
+ </table>
+
+ The Body has the following components:
+
+ <table>
+ <caption>Body for Query_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td><a name="query_log_event_status_vars" /> status_vars</td>
+ <td>variable length</td>
+ <td>Zero or more status variables. Each status variable consists
+ of one byte identifying the variable stored, followed by the value
+ of the variable. The possible variables are listed separately in
+ the table below. MySQL always writes events in the order defined
+ below; however, it is capable of reading them in any order.
+ </td>
+ </tr>
+
+ <tr>
+ <td>db</td>
+ <td>db_len+1</td>
+ <td>The currently selected database, as a null-terminated string.
+
+ (The trailing zero is redundant since the length is already known;
+ it is db_len from Post-Header.)
+ </td>
+ </tr>
+
+ <tr>
+ <td>query</td>
+ <td>variable length string without trailing zero, extending to the
+ end of the event (determined by the length field of the
+ Common-Header)
+ </td>
+ <td>The SQL query.</td>
+ </tr>
+ </table>
+
+ The following table lists the status variables that may appear in
+ the status_vars field.
+
+ <table>
+ <caption>Status variables for Query_log_event</caption>
+
+ <tr>
+ <th>Status variable</th>
+ <th>1-byte identifier</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>flags2</td>
+ <td>Q_FLAGS2_CODE == 0</td>
+ <td>4 byte bitfield</td>
+ <td>The flags in thd->options, binary AND-ed with
+ OPTIONS_WRITTEN_TO_BIN_LOG. The thd->options bitfield contains
+ options for SELECT. OPTIONS_WRITTEN identifies those options that
+ need to be written to the binlog (not all do). Specifically,
+ OPTIONS_WRITTEN_TO_BIN_LOG equals (OPTION_AUTO_IS_NULL |
+ OPTION_NO_FOREIGN_KEY_CHECKS | OPTION_RELAXED_UNIQUE_CHECKS |
+ OPTION_NOT_AUTOCOMMIT), or 0x0c084000 in hex.
+
+ These flags correspond to the SQL variables SQL_AUTO_IS_NULL,
+ FOREIGN_KEY_CHECKS, UNIQUE_CHECKS, and AUTOCOMMIT, documented in
+ the "SET Syntax" section of the MySQL Manual.
+
+ This field is always written to the binlog in version >= 5.0, and
+ never written in version < 5.0.
+ </td>
+ </tr>
+
+ <tr>
+ <td>sql_mode</td>
+ <td>Q_SQL_MODE_CODE == 1</td>
+ <td>8 byte integer</td>
+ <td>The sql_mode variable. See the section "SQL Modes" in the
+ MySQL manual, and see mysql_priv.h for a list of the possible
+ flags. Currently (2007-10-04), the following flags are available:
+ <pre>
+ MODE_REAL_AS_FLOAT==0x1
+ MODE_PIPES_AS_CONCAT==0x2
+ MODE_ANSI_QUOTES==0x4
+ MODE_IGNORE_SPACE==0x8
+ MODE_NOT_USED==0x10
+ MODE_ONLY_FULL_GROUP_BY==0x20
+ MODE_NO_UNSIGNED_SUBTRACTION==0x40
+ MODE_NO_DIR_IN_CREATE==0x80
+ MODE_POSTGRESQL==0x100
+ MODE_ORACLE==0x200
+ MODE_MSSQL==0x400
+ MODE_DB2==0x800
+ MODE_MAXDB==0x1000
+ MODE_NO_KEY_OPTIONS==0x2000
+ MODE_NO_TABLE_OPTIONS==0x4000
+ MODE_NO_FIELD_OPTIONS==0x8000
+ MODE_MYSQL323==0x10000
+ MODE_MYSQL323==0x20000
+ MODE_MYSQL40==0x40000
+ MODE_ANSI==0x80000
+ MODE_NO_AUTO_VALUE_ON_ZERO==0x100000
+ MODE_NO_BACKSLASH_ESCAPES==0x200000
+ MODE_STRICT_TRANS_TABLES==0x400000
+ MODE_STRICT_ALL_TABLES==0x800000
+ MODE_NO_ZERO_IN_DATE==0x1000000
+ MODE_NO_ZERO_DATE==0x2000000
+ MODE_INVALID_DATES==0x4000000
+ MODE_ERROR_FOR_DIVISION_BY_ZERO==0x8000000
+ MODE_TRADITIONAL==0x10000000
+ MODE_NO_AUTO_CREATE_USER==0x20000000
+ MODE_HIGH_NOT_PRECEDENCE==0x40000000
+ MODE_PAD_CHAR_TO_FULL_LENGTH==0x80000000
+ </pre>
+ All these flags are replicated from the server. However, all
+ flags except MODE_NO_DIR_IN_CREATE are honored by the slave; the
+ slave always preserves its old value of MODE_NO_DIR_IN_CREATE.
+ For a rationale, see comment in Query_log_event::do_apply_event in
+ log_event.cc.
+
+ This field is always written to the binlog.
+ </td>
+ </tr>
+
+ <tr>
+ <td>catalog</td>
+ <td>Q_CATALOG_NZ_CODE == 6</td>
+ <td>Variable-length string: the length in bytes (1 byte) followed
+ by the characters (at most 255 bytes)
+ </td>
+ <td>Stores the client's current catalog. Every database belongs
+ to a catalog, the same way that every table belongs to a
+ database. Currently, there is only one catalog, 'std'.
+
+ This field is written if the length of the catalog is > 0;
+ otherwise it is not written.
+ </td>
+ </tr>
+
+ <tr>
+ <td>auto_increment</td>
+ <td>Q_AUTO_INCREMENT == 3</td>
+ <td>two 2 byte unsigned integers, totally 2+2=4 bytes</td>
+
+ <td>The two variables auto_increment_increment and
+ auto_increment_offset, in that order. For more information, see
+ "System variables" in the MySQL manual.
+
+ This field is written if auto_increment>1; otherwise it is not
+ written.
+ </td>
+ </tr>
+
+ <tr>
+ <td>charset</td>
+ <td>Q_CHARSET_CODE == 4</td>
+ <td>three 2-byte unsigned integers (i.e., 6 bytes)</td>
+ <td>The three variables character_set_client,
+ collation_connection, and collation_server, in that order.
+ `character_set_client' is a code identifying the character set and
+ collation used by the client to encode the query.
+ `collation_connection' identifies the character set and collation
+ that the master converts the query to when it receives it; this is
+ useful when comparing literal strings. `collation_server' is the
+ default character set and collation used when a new database is
+ created.
+
+ See also "Connection Character Sets and Collations" in the MySQL
+ 5.1 manual.
+
+ All three variables are codes identifying a (character set,
+ collation) pair. To see which codes map to which pairs, run the
+ query "SELECT id, character_set_name, collation_name FROM
+ COLLATIONS".
+
+ Cf. Q_CHARSET_DATABASE_NUMBER below.
+
+ This field is always written.
+ </td>
+ </tr>
+
+ <tr>
+ <td>time_zone</td>
+ <td>Q_TIME_ZONE_CODE == 5</td>
+ <td>Variable-length string: the length in bytes (1 byte) followed
+ by the characters (at most 255 bytes).
+ <td>The time_zone of the master.
+
+ See also "System Variables" and "MySQL Server Time Zone Support"
+ in the MySQL manual.
+
+ This field is written if the length of the time zone string is >
+ 0; otherwise, it is not written.
+ </td>
+ </tr>
+
+ <tr>
+ <td>lc_time_names_number</td>
+ <td>Q_LC_TIME_NAMES_CODE == 7</td>
+ <td>2 byte integer</td>
+ <td>A code identifying a table of month and day names. The
+ mapping from codes to languages is defined in sql_locale.cc.
+
+ This field is written if it is != 0, i.e., if the locale is not
+ en_US.
+ </td>
+ </tr>
+
+ <tr>
+ <td>charset_database_number</td>
+ <td>Q_CHARSET_DATABASE_NUMBER == 8</td>
+ <td>2 byte integer</td>
+
+ <td>The value of the collation_database system variable (in the
+ source code stored in thd->variables.collation_database), which
+ holds the code for a (character set, collation) pair as described
+ above (see Q_CHARSET_CODE).
+
+ `collation_database' was used in old versions (???WHEN). Its
+ value was loaded when issuing a "use db" command and could be
+ changed by issuing a "SET collation_database=xxx" command. It
+ used to affect the "LOAD DATA INFILE" and "CREATE TABLE" commands.
+
+ In newer versions, "CREATE TABLE" has been changed to take the
+ character set from the database of the created table, rather than
+ the database of the current database. This makes a difference
+ when creating a table in another database than the current one.
+ "LOAD DATA INFILE" has not yet changed to do this, but there are
+ plans to eventually do it, and to make collation_database
+ read-only.
+
+ This field is written if it is not 0.
+ </td>
+ </tr>
+ </table>
+
+ @subsection Query_log_event_notes_on_previous_versions Notes on Previous Versions
+
+ @li Status vars were introduced in version 5.0. To read earlier
+ versions correctly, check the length of the Post-Header.
+
+ @li The status variable Q_CATALOG_CODE == 2 existed in MySQL 5.0.x,
+ where 0<=x<=3. It was identical to Q_CATALOG_CODE, except that the
+ string had a trailing '\0'. The '\0' was removed in 5.0.4 since it
+ was redundant (the string length is stored before the string). The
+ Q_CATALOG_CODE will never be written by a new master, but can still
+ be understood by a new slave.
+
+ @li See Q_CHARSET_DATABASE_NUMBER in the table above.
- ****************************************************************************/
+*/
class Query_log_event: public Log_event
{
protected:
@@ -1027,7 +1474,7 @@ public:
/*
'flags2' is a second set of flags (on top of those in Log_event), for
session variables. These are thd->options which is & against a mask
- (OPTIONS_WRITTEN_TO_BINLOG).
+ (OPTIONS_WRITTEN_TO_BIN_LOG).
flags2_inited helps make a difference between flags2==0 (3.23 or 4.x
master, we don't know flags2, so use the slave server's global options) and
flags2==0 (5.0 master, we know this has a meaning of flags all down which
@@ -1086,6 +1533,7 @@ public:
public: /* !!! Public in this patch to allow old usage */
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ virtual enum_skip_reason do_shall_skip(Relay_log_info *rli);
virtual int do_apply_event(Relay_log_info const *rli);
virtual int do_update_pos(Relay_log_info *rli);
@@ -1096,13 +1544,16 @@ public: /* !!! Public in this patch to allow old usage */
};
-/*****************************************************************************
+/**
+ @class Muted_query_log_event
- Muted Query Log Event class
+ Pretends to log SQL queries, but doesn't actually do so.
- Pretends to Log SQL queries, but doesn't actually do so.
+ @section Muted_query_log_event_binary_format Binary Format
- ****************************************************************************/
+ This log event is not stored, and thus the binary format is 0 bytes
+ long. Note that not even the Common-Header is stored.
+*/
class Muted_query_log_event: public Query_log_event
{
public:
@@ -1119,14 +1570,54 @@ public:
#ifdef HAVE_REPLICATION
-/*****************************************************************************
+/**
+ @class Slave_log_event
- Slave Log Event class
Note that this class is currently not used at all; no code writes a
- Slave_log_event (though some code in repl_failsafe.cc reads Slave_log_event).
- So it's not a problem if this code is not maintained.
-
- ****************************************************************************/
+ Slave_log_event (though some code in repl_failsafe.cc reads
+ Slave_log_event). So it's not a problem if this code is not
+ maintained.
+
+ @section Slave_log_event_binary_format Binary Format
+
+ This event type has no Post-Header. The Body has the following
+ four components.
+
+ <table>
+ <caption>Body for Slave_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>master_pos</td>
+ <td>8 byte integer</td>
+ <td>???TODO
+ </td>
+ </tr>
+
+ <tr>
+ <td>master_port</td>
+ <td>2 byte integer</td>
+ <td>???TODO</td>
+ </tr>
+
+ <tr>
+ <td>master_host</td>
+ <td>null-terminated string</td>
+ <td>???TODO</td>
+ </tr>
+
+ <tr>
+ <td>master_log</td>
+ <td>null-terminated string</td>
+ <td>???TODO</td>
+ </tr>
+ </table>
+*/
class Slave_log_event: public Log_event
{
protected:
@@ -1165,11 +1656,202 @@ private:
#endif /* HAVE_REPLICATION */
-/*****************************************************************************
-
- Load Log Event class
+/**
+ @class Load_log_event
+
+ This log event corresponds to a "LOAD DATA INFILE" SQL query on the
+ following form:
+
+ @verbatim
+ (1) USE db;
+ (2) LOAD DATA [LOCAL] INFILE 'file_name'
+ (3) [REPLACE | IGNORE]
+ (4) INTO TABLE 'table_name'
+ (5) [FIELDS
+ (6) [TERMINATED BY 'field_term']
+ (7) [[OPTIONALLY] ENCLOSED BY 'enclosed']
+ (8) [ESCAPED BY 'escaped']
+ (9) ]
+ (10) [LINES
+ (11) [TERMINATED BY 'line_term']
+ (12) [LINES STARTING BY 'line_start']
+ (13) ]
+ (14) [IGNORE skip_lines LINES]
+ (15) (field_1, field_2, ..., field_n)@endverbatim
+
+ @section Load_log_event_binary_format Binary Format
+
+ The Post-Header consists of the following six components.
+
+ <table>
+ <caption>Post-Header for Load_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>slave_proxy_id</td>
+ <td>4 byte unsigned integer</td>
+ <td>An integer identifying the client thread, which is unique on
+ the server. (Note, however, that the same slave_proxy_id may
+ appear on different servers.) This is used when a client thread
+ creates a temporary table. Temporary tables are local to the
+ client, and the slave_proxy_id is used to distinguish temporary
+ tables belonging to different clients.
+ </td>
+ </tr>
+
+ <tr>
+ <td>exec_time</td>
+ <td>4 byte unsigned integer</td>
+ <td>???TODO</td>
+ </tr>
+
+ <tr>
+ <td>skip_lines</td>
+ <td>4 byte unsigned integer</td>
+ <td>The number on line (14) above, if present, or 0 if line (14)
+ is left out.
+ </td>
+ </tr>
+
+ <tr>
+ <td>table_name_len</td>
+ <td>1 byte unsigned integer</td>
+ <td>The length of 'table_name' on line (4) above.</td>
+ </tr>
+
+ <tr>
+ <td>db_len</td>
+ <td>1 byte unsigned integer</td>
+ <td>The length of 'db' on line (1) above.</td>
+ </tr>
+
+ <tr>
+ <td>num_fields</td>
+ <td>4 byte unsigned integer</td>
+ <td>The number n of fields on line (15) above.</td>
+ </tr>
+ </table>
+
+ The Body contains the following components.
+
+ <table>
+ <caption>Body of Load_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>sql_ex</td>
+ <td>variable length</td>
+
+ <td>Describes the part of the query on lines (3) and
+ (5)&ndash;(13) above. More precisely, it stores the five strings
+ (on lines) field_term (6), enclosed (7), escaped (8), line_term
+ (11), and line_start (12); as well as a bitfield indicating the
+ presence of the keywords REPLACE (3), IGNORE (3), and OPTIONALLY
+ (7).
+
+ The data is stored in one of two formats, called "old" and "new".
+ The type field of Common-Header determines which of these two
+ formats is used: type LOAD_EVENT means that the old format is
+ used, and type NEW_LOAD_EVENT means that the new format is used.
+ When MySQL writes a Load_log_event, it uses the new format if at
+ least one of the five strings is two or more bytes long.
+ Otherwise (i.e., if all strings are 0 or 1 bytes long), the old
+ format is used.
+
+ The new and old format differ in the way the five strings are
+ stored.
+
+ <ul>
+ <li> In the new format, the strings are stored in the order
+ field_term, enclosed, escaped, line_term, line_start. Each string
+ consists of a length (1 byte), followed by a sequence of
+ characters (0-255 bytes). Finally, a boolean combination of the
+ following flags is stored in 1 byte: REPLACE_FLAG==0x4,
+ IGNORE_FLAG==0x8, and OPT_ENCLOSED_FLAG==0x2. If a flag is set,
+ it indicates the presence of the corresponding keyword in the SQL
+ query.
+
+ <li> In the old format, we know that each string has length 0 or
+ 1. Therefore, only the first byte of each string is stored. The
+ order of the strings is the same as in the new format. These five
+ bytes are followed by the same 1-byte bitfield as in the new
+ format. Finally, a 1 byte bitfield called empty_flags is stored.
+ The low 5 bits of empty_flags indicate which of the five strings
+ have length 0. For each of the following flags that is set, the
+ corresponding string has length 0; for the flags that are not set,
+ the string has length 1: FIELD_TERM_EMPTY==0x1,
+ ENCLOSED_EMPTY==0x2, LINE_TERM_EMPTY==0x4, LINE_START_EMPTY==0x8,
+ ESCAPED_EMPTY==0x10.
+ </ul>
+
+ Thus, the size of the new format is 6 bytes + the sum of the sizes
+ of the five strings. The size of the old format is always 7
+ bytes.
+ </td>
+ </tr>
+
+ <tr>
+ <td>field_lens</td>
+ <td>num_fields 1-byte unsigned integers</td>
+ <td>An array of num_fields integers representing the length of
+ each field in the query. (num_fields is from the Post-Header).
+ </td>
+ </tr>
+
+ <tr>
+ <td>fields</td>
+ <td>num_fields null-terminated strings</td>
+ <td>An array of num_fields null-terminated strings, each
+ representing a field in the query. (The trailing zero is
+ redundant, since the length are stored in the num_fields array.)
+ The total length of all strings equals to the sum of all
+ field_lens, plus num_fields bytes for all the trailing zeros.
+ </td>
+ </tr>
+
+ <tr>
+ <td>table_name</td>
+ <td>null-terminated string of length table_len+1 bytes</td>
+ <td>The 'table_name' from the query, as a null-terminated string.
+ (The trailing zero is actually redundant since the table_len is
+ known from Post-Header.)
+ </td>
+ </tr>
+
+ <tr>
+ <td>db</td>
+ <td>null-terminated string of length db_len+1 bytes</td>
+ <td>The 'db' from the query, as a null-terminated string.
+ (The trailing zero is actually redundant since the db_len is known
+ from Post-Header.)
+ </td>
+ </tr>
+
+ <tr>
+ <td>file_name</td>
+ <td>variable length string without trailing zero, extending to the
+ end of the event (determined by the length field of the
+ Common-Header)
+ </td>
+ <td>The 'file_name' from the query.
+ </td>
+ </tr>
+
+ </table>
+
+ @subsection Load_log_event_notes_on_previous_versions Notes on Previous Versions
- ****************************************************************************/
+*/
class Load_log_event: public Log_event
{
private:
@@ -1276,9 +1958,8 @@ public: /* !!! Public in this patch to allow old usage */
extern char server_version[SERVER_VERSION_LENGTH];
-/*****************************************************************************
-
- Start Log Event_v3 class
+/**
+ @class Start_log_event_v3
Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and
4.x).
@@ -1288,8 +1969,8 @@ extern char server_version[SERVER_VERSION_LENGTH];
MySQL 5.0 whenever it starts sending a new binlog if the requested position
is >4 (otherwise if ==4 the event will be sent naturally).
- ****************************************************************************/
-
+ @section Start_log_event_v3_binary_format Binary Format
+*/
class Start_log_event_v3: public Log_event
{
public:
@@ -1372,10 +2053,14 @@ protected:
};
-/*
- For binlog version 4.
- This event is saved by threads which read it, as they need it for future
- use (to decode the ordinary events).
+/**
+ @class Format_description_log_event
+
+ For binlog version 4.
+ This event is saved by threads which read it, as they need it for future
+ use (to decode the ordinary events).
+
+ @section Format_description_log_event_binary_format Binary Format
*/
class Format_description_log_event: public Start_log_event_v3
@@ -1429,13 +2114,41 @@ protected:
};
-/*****************************************************************************
+/**
+ @class Intvar_log_event
- Intvar Log Event class
+ Logs special variables related to auto_increment values.
- Logs special variables such as auto_increment values
+ @section Intvar_log_event_binary_format Binary Format
- ****************************************************************************/
+ The Post-Header has two components:
+
+ <table>
+ <caption>Post-Header for Intvar_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>Type</td>
+ <td>1 byte enumeration</td>
+ <td>One byte identifying the type of variable stored. Currently,
+ two identifiers are supported: LAST_INSERT_ID_EVENT==1 and
+ INSERT_ID_EVENT==2.
+ </td>
+ </tr>
+
+ <tr>
+ <td>value</td>
+ <td>8 byte unsigned integer</td>
+ <td>The value of the variable.</td>
+ </tr>
+
+ </table>
+*/
class Intvar_log_event: public Log_event
{
@@ -1474,16 +2187,24 @@ private:
};
-/*****************************************************************************
-
- Rand Log Event class
+/**
+ @class Rand_log_event
Logs random seed used by the next RAND(), and by PASSWORD() in 4.1.0.
4.1.1 does not need it (it's repeatable again) so this event needn't be
written in 4.1.1 for PASSWORD() (but the fact that it is written is just a
waste, it does not cause bugs).
- ****************************************************************************/
+ @section Rand_log_event_binary_format Binary Format
+ This event type has no Post-Header. The Body of this event type has
+ two components:
+
+ @li seed1 (8 bytes): 64 bit random seed1.
+ @li seed2 (8 bytes): 64 bit random seed2.
+
+ The state of the random number generation consists of 128 bits,
+ which are stored internally as two 64-bit numbers.
+*/
class Rand_log_event: public Log_event
{
@@ -1520,14 +2241,14 @@ private:
#endif
};
-/*****************************************************************************
-
- Xid Log Event class
+/**
+ @class Xid_log_event
Logs xid of the transaction-to-be-committed in the 2pc protocol.
Has no meaning in replication, slaves ignore it.
- ****************************************************************************/
+ @section Xid_log_event_binary_format Binary Format
+*/
#ifdef MYSQL_CLIENT
typedef ulonglong my_xid; // this line is the same as in handler.h
#endif
@@ -1559,17 +2280,18 @@ class Xid_log_event: public Log_event
private:
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual int do_apply_event(Relay_log_info const *rli);
+ enum_skip_reason do_shall_skip(Relay_log_info *rli);
#endif
};
-/*****************************************************************************
-
- User var Log Event class
+/**
+ @class User_var_log_event
Every time a query uses the value of a user variable, a User_var_log_event is
written before the Query_log_event, to set the user variable.
- ****************************************************************************/
+ @section User_var_log_event_binary_format Binary Format
+*/
class User_var_log_event: public Log_event
{
@@ -1611,11 +2333,14 @@ private:
};
-/*****************************************************************************
+/**
+ @class Stop_log_event
- Stop Log Event class
+ @section Stop_log_event_binary_format Binary Format
- ****************************************************************************/
+ The Post-Header and Body for this event type are empty; it only has
+ the Common-Header.
+*/
class Stop_log_event: public Log_event
{
public:
@@ -1651,13 +2376,54 @@ private:
#endif
};
-/*****************************************************************************
-
- Rotate Log Event class
+/**
+ @class Rotate_log_event
This will be deprecated when we move to using sequence ids.
- ****************************************************************************/
+ @section Rotate_log_event_binary_format Binary Format
+
+ The Post-Header has one component:
+
+ <table>
+ <caption>Post-Header for Rotate_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>pos</td>
+ <td>8 byte integer</td>
+ <td>???TODO</td>
+ </tr>
+
+ </table>
+
+ The Body has one component:
+
+ <table>
+ <caption>Body for Rotate_log_event</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Size<br/></th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>new_log_ident</td>
+ <td>variable length string without trailing zero, extending to the
+ end of the event (determined by the length field of the
+ Common-Header)
+ </td>
+ <td>???TODO</td>
+ </tr>
+
+ </table>
+*/
class Rotate_log_event: public Log_event
{
@@ -1704,9 +2470,11 @@ private:
/* the classes below are for the new LOAD DATA INFILE logging */
-/*****************************************************************************
- Create File Log Event class
- ****************************************************************************/
+/**
+ @class Create_file_log_event
+
+ @section Create_file_log_event_binary_format Binary Format
+*/
class Create_file_log_event: public Load_log_event
{
@@ -1775,11 +2543,11 @@ private:
};
-/*****************************************************************************
-
- Append Block Log Event class
+/**
+ @class Append_block_log_event
- ****************************************************************************/
+ @section Append_block_log_event_binary_format Binary Format
+*/
class Append_block_log_event: public Log_event
{
@@ -1830,11 +2598,11 @@ private:
};
-/*****************************************************************************
-
- Delete File Log Event class
+/**
+ @class Delete_file_log_event
- ****************************************************************************/
+ @section Delete_file_log_event_binary_format Binary Format
+*/
class Delete_file_log_event: public Log_event
{
@@ -1871,11 +2639,11 @@ private:
};
-/*****************************************************************************
-
- Execute Load Log Event class
+/**
+ @class Execute_load_log_event
- ****************************************************************************/
+ @section Delete_file_log_event_binary_format Binary Format
+*/
class Execute_load_log_event: public Log_event
{
@@ -1911,15 +2679,15 @@ private:
};
-/***************************************************************************
-
- Begin load query Log Event class
+/**
+ @class Begin_load_query_log_event
Event for the first block of file to be loaded, its only difference from
Append_block event is that this event creates or truncates existing file
before writing data.
-****************************************************************************/
+ @section Begin_load_query_log_event_binary_format Binary Format
+*/
class Begin_load_query_log_event: public Append_block_log_event
{
public:
@@ -1937,6 +2705,10 @@ public:
*description_event);
~Begin_load_query_log_event() {}
Log_event_type get_type_code() { return BEGIN_LOAD_QUERY_EVENT; }
+private:
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ virtual enum_skip_reason do_shall_skip(Relay_log_info *rli);
+#endif
};
@@ -1946,15 +2718,15 @@ public:
enum enum_load_dup_handling { LOAD_DUP_ERROR= 0, LOAD_DUP_IGNORE,
LOAD_DUP_REPLACE };
-/****************************************************************************
-
- Execute load query Log Event class
+/**
+ @class Execute_load_query_log_event
Event responsible for LOAD DATA execution, it similar to Query_log_event
but before executing the query it substitutes original filename in LOAD DATA
query with name of temporary file.
-****************************************************************************/
+ @section Execute_load_query_log_event_binary_format Binary Format
+*/
class Execute_load_query_log_event: public Query_log_event
{
public:
@@ -1972,10 +2744,12 @@ public:
#ifndef MYSQL_CLIENT
Execute_load_query_log_event(THD* thd, const char* query_arg,
- ulong query_length, uint fn_pos_start_arg,
- uint fn_pos_end_arg,
- enum_load_dup_handling dup_handling_arg,
- bool using_trans, bool suppress_use);
+ ulong query_length, uint fn_pos_start_arg,
+ uint fn_pos_end_arg,
+ enum_load_dup_handling dup_handling_arg,
+ bool using_trans, bool suppress_use,
+ THD::killed_state
+ killed_err_arg= THD::KILLED_NO_VALUE);
#ifdef HAVE_REPLICATION
void pack_info(Protocol* protocol);
#endif /* HAVE_REPLICATION */
@@ -2006,6 +2780,11 @@ private:
#ifdef MYSQL_CLIENT
+/**
+ @class Unknown_log_event
+
+ @section Unknown_log_event_binary_format Binary Format
+*/
class Unknown_log_event: public Log_event
{
public:
@@ -2026,14 +2805,14 @@ public:
#endif
char *str_to_hex(char *to, const char *from, uint len);
-/*****************************************************************************
-
- Table map log event class
+/**
+ @class Table_map_log_event
Create a mapping from a (database name, table name) couple to a table
identifier (an integer number).
- ****************************************************************************/
+ @section Table_map_log_event_binary_format Binary Format
+*/
class Table_map_log_event : public Log_event
{
public:
@@ -2143,9 +2922,8 @@ private:
};
-/*****************************************************************************
-
- Row level log event class.
+/**
+ @class Rows_log_event
Common base class for all row-containing log events.
@@ -2155,7 +2933,8 @@ private:
- Write data header and data body to an IO_CACHE.
- Provide an interface for adding an individual row to the event.
- ****************************************************************************/
+ @section Rows_log_event_binary_format Binary Format
+*/
class Rows_log_event : public Log_event
@@ -2300,7 +3079,7 @@ protected:
uchar *m_rows_cur; /* One-after the end of the data */
uchar *m_rows_end; /* One-after the end of the allocated space */
- flag_set m_flags; /* Flags for row-level events */
+ flag_set m_flags; /* Flags for row-level events */
/* helper functions */
@@ -2316,8 +3095,11 @@ protected:
int unpack_current_row(const Relay_log_info *const rli)
{
DBUG_ASSERT(m_table);
- return ::unpack_row(rli, m_table, m_width, m_curr_row, &m_cols,
- &m_curr_row_end, &m_master_reclength);
+ ASSERT_OR_RETURN_ERROR(m_curr_row < m_rows_end, HA_ERR_CORRUPT_EVENT);
+ int const result= ::unpack_row(rli, m_table, m_width, m_curr_row, &m_cols,
+ &m_curr_row_end, &m_master_reclength);
+ ASSERT_OR_RETURN_ERROR(m_curr_row_end <= m_rows_end, HA_ERR_CORRUPT_EVENT);
+ return result;
}
#endif
@@ -2383,15 +3165,15 @@ private:
friend class Old_rows_log_event;
};
-/*****************************************************************************
-
- Write row log event class
+/**
+ @class Write_rows_log_event
Log row insertions and updates. The event contain several
insert/update rows for a table. Note that each event contains only
rows for one table.
- ****************************************************************************/
+ @section Write_rows_log_event_binary_format Binary Format
+*/
class Write_rows_log_event : public Rows_log_event
{
public:
@@ -2438,9 +3220,8 @@ private:
};
-/*****************************************************************************
-
- Update rows log event class
+/**
+ @class Update_rows_log_event
Log row updates with a before image. The event contain several
update rows for a table. Note that each event contains only rows for
@@ -2449,7 +3230,8 @@ private:
Also note that the row data consists of pairs of row data: one row
for the old data and one row for the new data.
- ****************************************************************************/
+ @section Update_rows_log_event_binary_format Binary Format
+*/
class Update_rows_log_event : public Rows_log_event
{
public:
@@ -2511,9 +3293,8 @@ protected:
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
};
-/*****************************************************************************
-
- Delete rows log event class.
+/**
+ @class Delete_rows_log_event
Log row deletions. The event contain several delete rows for a
table. Note that each event contains only rows for one table.
@@ -2530,7 +3311,8 @@ protected:
Row_reader
Extract the rows from the event.
- ****************************************************************************/
+ @section Delete_rows_log_event_binary_format Binary Format
+*/
class Delete_rows_log_event : public Rows_log_event
{
public:
@@ -2580,6 +3362,8 @@ protected:
#include "log_event_old.h"
/**
+ @class Incident_log_event
+
Class representing an incident, an occurance out of the ordinary,
that happened on the master.
@@ -2591,7 +3375,7 @@ protected:
<caption>Incident event format</caption>
<tr>
<th>Symbol</th>
- <th>Size<br>(bytes)</th>
+ <th>Size<br/>(bytes)</th>
<th>Description</th>
</tr>
<tr>
@@ -2610,7 +3394,9 @@ protected:
<td>The message, if present. Not null terminated.</td>
</tr>
</table>
- */
+
+ @section Delete_rows_log_event_binary_format Binary Format
+*/
class Incident_log_event : public Log_event {
public:
#ifndef MYSQL_CLIENT
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 4cf6e05751f..9e947470ca1 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2936,7 +2936,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
global_system_variables.collation_connection= default_charset_info;
global_system_variables.character_set_results= default_charset_info;
global_system_variables.character_set_client= default_charset_info;
- global_system_variables.collation_connection= default_charset_info;
if (!(character_set_filesystem=
get_charset_by_csname(character_set_filesystem_name,
diff --git a/sql/records.cc b/sql/records.cc
index 81c26da4b4d..0bf815e4073 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -55,6 +55,7 @@ static int rr_index(READ_RECORD *info);
void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx)
{
+ empty_record(table);
bzero((char*) info,sizeof(*info));
info->table= table;
info->file= table->file;
@@ -161,6 +162,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
}
else
{
+ empty_record(table);
info->record= table->record[0];
info->ref_length= table->file->ref_length;
}
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index 65c8e106112..ed0dc82cf01 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -65,6 +65,8 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
my_ptrdiff_t const rec_offset= record - table->record[0];
my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
+ DBUG_ENTER("pack_row");
+
/*
We write the null bits and the packed records using one pass
through all the fields. The null bytes are written little-endian,
@@ -96,26 +98,17 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
For big-endian machines, we have to make sure that the
length is stored in little-endian format, since this is the
format used for the binlog.
-
- We do this by setting the db_low_byte_first, which is used
- inside some store_length() to decide what order to write the
- bytes in.
-
- In reality, db_log_byte_first is only set for legacy table
- type Isam, but in the event of a bug, we need to guarantee
- the endianess when writing to the binlog.
-
- This is currently broken for NDB due to BUG#29549, so we
- will fix it when NDB has fixed their way of handling BLOBs.
*/
-#if 0
- bool save= table->s->db_low_byte_first;
- table->s->db_low_byte_first= TRUE;
-#endif
- pack_ptr= field->pack(pack_ptr, field->ptr + offset);
-#if 0
- table->s->db_low_byte_first= save;
+#ifndef DBUG_OFF
+ const uchar *old_pack_ptr= pack_ptr;
#endif
+ pack_ptr= field->pack(pack_ptr, field->ptr + offset,
+ field->max_data_length(), TRUE);
+ DBUG_PRINT("debug", ("field: %s; pack_ptr: 0x%lx;"
+ " pack_ptr':0x%lx; bytes: %d",
+ field->field_name, (ulong) old_pack_ptr,
+ (ulong) pack_ptr,
+ (int) (pack_ptr - old_pack_ptr)));
}
null_mask <<= 1;
@@ -143,8 +136,8 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
packed data. If it doesn't, something is very wrong.
*/
DBUG_ASSERT(null_ptr == row_data + null_byte_count);
-
- return static_cast<size_t>(pack_ptr - row_data);
+ DBUG_DUMP("row_data", row_data, pack_ptr - row_data);
+ DBUG_RETURN(static_cast<size_t>(pack_ptr - row_data));
}
#endif
@@ -242,18 +235,16 @@ unpack_row(Relay_log_info const *rli,
Use the master's size information if available else call
normal unpack operation.
*/
-#if 0
- bool save= table->s->db_low_byte_first;
- table->s->db_low_byte_first= TRUE;
-#endif
uint16 const metadata= tabledef->field_metadata(i);
- if (tabledef && metadata)
- pack_ptr= f->unpack(f->ptr, pack_ptr, metadata);
- else
- pack_ptr= f->unpack(f->ptr, pack_ptr);
-#if 0
- table->s->db_low_byte_first= save;
+#ifndef DBUG_OFF
+ uchar const *const old_pack_ptr= pack_ptr;
#endif
+ pack_ptr= f->unpack(f->ptr, pack_ptr, metadata, TRUE);
+ DBUG_PRINT("debug", ("field: %s; metadata: 0x%x;"
+ " pack_ptr: 0x%lx; pack_ptr': 0x%lx; bytes: %d",
+ f->field_name, metadata,
+ (ulong) old_pack_ptr, (ulong) pack_ptr,
+ (int) (pack_ptr - old_pack_ptr)));
}
null_mask <<= 1;
@@ -289,6 +280,8 @@ unpack_row(Relay_log_info const *rli,
*/
DBUG_ASSERT(null_ptr == row_data + master_null_byte_count);
+ DBUG_DUMP("row_data", row_data, pack_ptr - row_data);
+
*row_end = pack_ptr;
if (master_reclength)
{
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index 867d55a60a3..15d7d97affd 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -1082,6 +1082,9 @@ bool Relay_log_info::cached_charset_compare(char *charset) const
void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
time_t event_creation_time)
{
+#ifndef DBUG_OFF
+ extern uint debug_not_change_ts_if_art_event;
+#endif
clear_flag(IN_STMT);
/*
@@ -1121,7 +1124,12 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
is that value may take some time to display in
Seconds_Behind_Master - not critical).
*/
- last_master_timestamp= event_creation_time;
+#ifndef DBUG_OFF
+ if (!(event_creation_time == 0 && debug_not_change_ts_if_art_event > 0))
+#else
+ if (event_creation_time != 0)
+#endif
+ last_master_timestamp= event_creation_time;
}
}
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 10ecf1a43d4..a3a57ad4ce9 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -366,6 +366,18 @@ public:
}
/**
+ Get the value of a replication state flag.
+
+ @param flag Flag to get value of
+
+ @return @c true if the flag was set, @c false otherwise.
+ */
+ bool get_flag(enum_state_flag flag)
+ {
+ return m_flags & (1UL << flag);
+ }
+
+ /**
Clear the value of a replication state flag.
@param flag Flag to clear
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index d1ce5bf3b7b..b3ca26d4c2c 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -31,31 +31,34 @@ uint32 table_def::calc_field_size(uint col, uchar *master_data) const
switch (type(col)) {
case MYSQL_TYPE_NEWDECIMAL:
length= my_decimal_get_binary_size(m_field_metadata[col] >> 8,
- m_field_metadata[col] - ((m_field_metadata[col] >> 8) << 8));
+ m_field_metadata[col] & 0xff);
break;
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_FLOAT:
case MYSQL_TYPE_DOUBLE:
length= m_field_metadata[col];
break;
+ /*
+ The cases for SET and ENUM are include for completeness, however
+ both are mapped to type MYSQL_TYPE_STRING and their real types
+ are encoded in the field metadata.
+ */
case MYSQL_TYPE_SET:
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_STRING:
{
- if (((m_field_metadata[col] & 0xff00) == (MYSQL_TYPE_SET << 8)) ||
- ((m_field_metadata[col] & 0xff00) == (MYSQL_TYPE_ENUM << 8)))
+ uchar type= m_field_metadata[col] >> 8U;
+ if ((type == MYSQL_TYPE_SET) || (type == MYSQL_TYPE_ENUM))
length= m_field_metadata[col] & 0x00ff;
else
{
- length= m_field_metadata[col] & 0x00ff;
- DBUG_ASSERT(length > 0);
- if (length > 255)
- {
- DBUG_ASSERT(uint2korr(master_data) > 0);
- length= uint2korr(master_data) + 2;
- }
- else
- length= (uint) *master_data + 1;
+ /*
+ We are reading the actual size from the master_data record
+ because this field has the actual lengh stored in the first
+ byte.
+ */
+ length= (uint) *master_data + 1;
+ DBUG_ASSERT(length != 0);
}
break;
}
@@ -95,6 +98,13 @@ uint32 table_def::calc_field_size(uint col, uchar *master_data) const
break;
case MYSQL_TYPE_BIT:
{
+ /*
+ Decode the size of the bit field from the master.
+ from_len is the length in bytes from the master
+ from_bit_len is the number of extra bits stored in the master record
+ If from_bit_len is not 0, add 1 to the length to account for accurate
+ number of bytes needed.
+ */
uint from_len= (m_field_metadata[col] >> 8U) & 0x00ff;
uint from_bit_len= m_field_metadata[col] & 0x00ff;
DBUG_ASSERT(from_bit_len <= 7);
@@ -136,7 +146,7 @@ uint32 table_def::calc_field_size(uint col, uchar *master_data) const
length= *master_data;
break;
case 2:
- length= sint2korr(master_data);
+ length= uint2korr(master_data);
break;
case 3:
length= uint3korr(master_data);
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index 26edbdd1405..375715c7858 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -99,7 +99,7 @@ public:
/*
These types store a single byte.
*/
- m_field_metadata[i]= (uchar)field_metadata[index];
+ m_field_metadata[i]= field_metadata[index];
index++;
break;
}
@@ -107,14 +107,14 @@ public:
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_STRING:
{
- short int x= field_metadata[index++] << 8U; // real_type
- x = x + field_metadata[index++]; // pack or field length
+ uint16 x= field_metadata[index++] << 8U; // real_type
+ x+= field_metadata[index++]; // pack or field length
m_field_metadata[i]= x;
break;
}
case MYSQL_TYPE_BIT:
{
- short int x= field_metadata[index++];
+ uint16 x= field_metadata[index++];
x = x + (field_metadata[index++] << 8U);
m_field_metadata[i]= x;
break;
@@ -125,14 +125,14 @@ public:
These types store two bytes.
*/
char *ptr= (char *)&field_metadata[index];
- m_field_metadata[i]= sint2korr(ptr);
+ m_field_metadata[i]= uint2korr(ptr);
index= index + 2;
break;
}
case MYSQL_TYPE_NEWDECIMAL:
{
- short int x= field_metadata[index++] << 8U; // precision
- x = x + field_metadata[index++]; // decimals
+ uint16 x= field_metadata[index++] << 8U; // precision
+ x+= field_metadata[index++]; // decimals
m_field_metadata[i]= x;
break;
}
diff --git a/sql/slave.cc b/sql/slave.cc
index 2512954f805..b6611d44723 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1715,7 +1715,14 @@ static int has_temporary_error(THD *thd)
DBUG_ENTER("has_temporary_error");
if (thd->is_fatal_error)
+ {
+ DBUG_PRINT("info", ("thd->net.last_errno: %s", ER(thd->net.last_errno)));
DBUG_RETURN(0);
+ }
+
+ DBUG_EXECUTE_IF("all_errors_are_temporary_errors",
+ if (thd->net.last_errno)
+ thd->net.last_errno= ER_LOCK_DEADLOCK;);
/*
Temporary error codes:
@@ -1724,7 +1731,10 @@ static int has_temporary_error(THD *thd)
*/
if (thd->net.last_errno == ER_LOCK_DEADLOCK ||
thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT)
+ {
+ DBUG_PRINT("info", ("thd->net.last_errno: %s", ER(thd->net.last_errno)));
DBUG_RETURN(1);
+ }
#ifdef HAVE_NDB_BINLOG
/*
@@ -1795,9 +1805,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
int const type_code= ev->get_type_code();
int exec_res= 0;
- /*
- */
-
DBUG_PRINT("exec_event",("%s(type_code: %d; server_id: %d)",
ev->get_type_str(), type_code, ev->server_id));
DBUG_PRINT("info", ("thd->options: %s%s; rli->last_event_start_time: %lu",
@@ -1806,7 +1813,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
rli->last_event_start_time));
-
/*
Execute the event to change the database and update the binary
log coordinates, but first we set some data that is needed for
@@ -1854,10 +1860,13 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
// EVENT_SKIP_NOT,
"not skipped",
// EVENT_SKIP_IGNORE,
- "skipped because event originated from this server",
+ "skipped because event should be ignored",
// EVENT_SKIP_COUNT
"skipped because event skip counter was non-zero"
};
+ DBUG_PRINT("info", ("OPTION_BEGIN: %d; IN_STMT: %d",
+ thd->options & OPTION_BEGIN ? 1 : 0,
+ rli->get_flag(Relay_log_info::IN_STMT)));
DBUG_PRINT("skip_event", ("%s event was %s",
ev->get_type_str(), explain[reason]));
#endif
@@ -1906,7 +1915,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
}
if (slave_trans_retries)
{
- if (exec_res && has_temporary_error(thd))
+ int temp_err;
+ if (exec_res && (temp_err= has_temporary_error(thd)))
{
const char *errmsg;
/*
@@ -1954,15 +1964,19 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
"the slave_transaction_retries variable.",
slave_trans_retries);
}
- else if (!((thd->options & OPTION_BEGIN) && opt_using_transactions))
+ else if (exec_res && !temp_err ||
+ (opt_using_transactions &&
+ rli->group_relay_log_pos == rli->event_relay_log_pos))
{
/*
- Only reset the retry counter if the event succeeded or
- failed with a non-transient error. On a successful event,
- the execution will proceed as usual; in the case of a
+ Only reset the retry counter if the entire group succeeded
+ or failed with a non-transient error. On a successful
+ event, the execution will proceed as usual; in the case of a
non-transient error, the slave will stop with an error.
*/
rli->trans_retries= 0; // restart from fresh
+ DBUG_PRINT("info", ("Resetting retry counter, rli->trans_retries: %lu",
+ rli->trans_retries));
}
}
DBUG_RETURN(exec_res);
@@ -2451,6 +2465,7 @@ pthread_handler_t handle_slave_sql(void *arg)
rli->ignore_log_space_limit= 0;
pthread_mutex_unlock(&rli->log_space_lock);
rli->trans_retries= 0; // start from "no error"
+ DBUG_PRINT("info", ("rli->trans_retries: %lu", rli->trans_retries));
if (init_relay_log_pos(rli,
rli->group_relay_log_name,
@@ -3582,7 +3597,16 @@ static Log_event* next_event(Relay_log_info* rli)
a new event and is queuing it; the false "0" will exist until SQL
finishes executing the new event; it will be look abnormal only if
the events have old timestamps (then you get "many", 0, "many").
- Transient phases like this can't really be fixed.
+
+ Transient phases like this can be fixed with implemeting
+ Heartbeat event which provides the slave the status of the
+ master at time the master does not have any new update to send.
+ Seconds_Behind_Master would be zero only when master has no
+ more updates in binlog for slave. The heartbeat can be sent
+ in a (small) fraction of slave_net_timeout. Until it's done
+ rli->last_master_timestamp is temporarely (for time of
+ waiting for the following event) reset whenever EOF is
+ reached.
*/
time_t save_timestamp= rli->last_master_timestamp;
rli->last_master_timestamp= 0;
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index c0ea73a6c00..9babbcd49d8 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -102,8 +102,9 @@ sp_get_item_value(THD *thd, Item *item, String *str)
case REAL_RESULT:
case INT_RESULT:
case DECIMAL_RESULT:
- return item->val_str(str);
-
+ if (item->field_type() != MYSQL_TYPE_BIT)
+ return item->val_str(str);
+ else {/* Bit type is handled as binary string */}
case STRING_RESULT:
{
String *result= item->val_str(str);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 9dd3856dfa8..c5b70cfa687 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1264,14 +1264,16 @@ public:
We follow this logic:
- when stmt starts, first_successful_insert_id_in_prev_stmt contains the
first insert id successfully inserted by the previous stmt.
- - as stmt makes progress, handler::insert_id_for_cur_row changes; every
- time get_auto_increment() is called, auto_inc_intervals_for_binlog is
- augmented with the reserved interval (if statement-based binlogging).
+ - as stmt makes progress, handler::insert_id_for_cur_row changes;
+ every time get_auto_increment() is called,
+ auto_inc_intervals_in_cur_stmt_for_binlog is augmented with the
+ reserved interval (if statement-based binlogging).
- at first successful insertion of an autogenerated value,
first_successful_insert_id_in_cur_stmt is set to
handler::insert_id_for_cur_row.
- - when stmt goes to binlog, auto_inc_intervals_for_binlog is
- binlogged if non-empty.
+ - when stmt goes to binlog,
+ auto_inc_intervals_in_cur_stmt_for_binlog is binlogged if
+ non-empty.
- when stmt ends, first_successful_insert_id_in_prev_stmt is set to
first_successful_insert_id_in_cur_stmt.
*/
@@ -2491,6 +2493,11 @@ class multi_delete :public select_result_interceptor
/* True if at least one table we delete from is not transactional */
bool normal_tables;
bool delete_while_scanning;
+ /*
+ error handling (rollback and binlogging) can happen in send_eof()
+ so that afterward send_error() needs to find out that.
+ */
+ bool error_handled;
public:
multi_delete(TABLE_LIST *dt, uint num_of_tables);
@@ -2526,6 +2533,11 @@ class multi_update :public select_result_interceptor
/* True if the update operation has made a change in a transactional table */
bool transactional_tables;
bool ignore;
+ /*
+ error handling (rollback and binlogging) can happen in send_eof()
+ so that afterward send_error() needs to find out that.
+ */
+ bool error_handled;
public:
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index abbf2131957..ad4e0d803eb 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -883,6 +883,13 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
+ /*
+ This statement will be replicated as a statement, even when using
+ row-based replication. The flag will be reset at the end of the
+ statement.
+ */
+ thd->clear_current_stmt_binlog_row_based();
+
length= build_table_filename(path, sizeof(path), db, "", "", 0);
strmov(path+length, MY_DB_OPT_FILE); // Append db option file name
del_dbopt(path); // Remove dboption hash entry
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index f183cb3142f..509e736f6e7 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -39,6 +39,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
ha_rows deleted= 0;
uint usable_index= MAX_KEY;
SELECT_LEX *select_lex= &thd->lex->select_lex;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_delete");
if (open_and_lock_tables(thd, table_list))
@@ -307,7 +308,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
else
table->file->unlock_row(); // Row failed selection, release lock on it
}
- if (thd->killed || thd->is_error())
+ killed_status= thd->killed;
+ if (killed_status != THD::NOT_KILLED || thd->is_error())
error= 1; // Aborted
if (will_batch && (loc_error= table->file->end_bulk_delete()))
{
@@ -352,13 +354,12 @@ cleanup:
thd->transaction.stmt.modified_non_trans_table= TRUE;
/* See similar binlogging code in sql_update.cc, for comments */
- if ((error < 0) || (deleted && !transactional_table))
+ if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
if (error < 0)
thd->clear_error();
-
/*
[binlog]: If 'handler::delete_all_rows()' was called and the
storage engine does not inject the rows itself, we replicate
@@ -367,7 +368,7 @@ cleanup:
*/
int log_result= thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_status);
if (log_result && transactional_table)
{
@@ -548,7 +549,7 @@ bool mysql_multi_delete_prepare(THD *thd)
multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg)
: delete_tables(dt), deleted(0), found(0),
num_of_tables(num_of_tables_arg), error(0),
- do_delete(0), transactional_tables(0), normal_tables(0)
+ do_delete(0), transactional_tables(0), normal_tables(0), error_handled(0)
{
tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables);
}
@@ -727,12 +728,14 @@ void multi_delete::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_message(errcode, err, MYF(0));
- /* If nothing deleted return */
- if (!deleted)
+ /* the error was handled or nothing deleted and no side effects return */
+ if (error_handled ||
+ !thd->transaction.stmt.modified_non_trans_table && !deleted)
DBUG_VOID_RETURN;
/* Something already deleted so we have to invalidate cache */
- query_cache_invalidate3(thd, delete_tables, 1);
+ if (deleted)
+ query_cache_invalidate3(thd, delete_tables, 1);
/*
If rows from the first table only has been deleted and it is
@@ -752,12 +755,30 @@ void multi_delete::send_error(uint errcode,const char *err)
*/
error= 1;
send_eof();
+ DBUG_ASSERT(error_handled);
+ DBUG_VOID_RETURN;
}
- DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table);
+
+ if (thd->transaction.stmt.modified_non_trans_table)
+ {
+ /*
+ there is only side effects; to binlog with the error
+ */
+ if (mysql_bin_log.is_open())
+ {
+ thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_tables, FALSE);
+ }
+ thd->transaction.all.modified_non_trans_table= true;
+ }
+ DBUG_ASSERT(!normal_tables || !deleted ||
+ thd->transaction.stmt.modified_non_trans_table);
DBUG_VOID_RETURN;
}
+
/*
Do delete from other tables.
Returns values:
@@ -850,6 +871,7 @@ int multi_delete::do_deletes()
bool multi_delete::send_eof()
{
+ THD::killed_state killed_status= THD::NOT_KILLED;
thd->proc_info="deleting from reference tables";
/* Does deletes for the last n - 1 tables, returns 0 if ok */
@@ -857,7 +879,7 @@ bool multi_delete::send_eof()
/* compute a total error to know if something failed */
local_error= local_error || error;
-
+ killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
/* reset used flags */
thd->proc_info="end";
@@ -869,7 +891,9 @@ bool multi_delete::send_eof()
{
query_cache_invalidate3(thd, delete_tables, 1);
}
- if ((local_error == 0) || (deleted && normal_tables))
+ DBUG_ASSERT(!normal_tables || !deleted ||
+ thd->transaction.stmt.modified_non_trans_table);
+ if ((local_error == 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
@@ -877,7 +901,7 @@ bool multi_delete::send_eof()
thd->clear_error();
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- transactional_tables, FALSE) &&
+ transactional_tables, FALSE, killed_status) &&
!normal_tables)
{
local_error=1; // Log write failed: roll back the SQL statement
@@ -886,7 +910,8 @@ bool multi_delete::send_eof()
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
- DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table);
+ if (local_error != 0)
+ error_handled= TRUE; // to force early leave from ::send_error()
/* Commit or rollback the current SQL statement */
if (transactional_tables)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 11e70a2e5da..96894fac5ec 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -837,59 +837,58 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
}
transactional_table= table->file->has_transactions();
- if ((changed= (info.copied || info.deleted || info.updated)) ||
- was_insert_delayed)
+ if ((changed= (info.copied || info.deleted || info.updated)))
{
/*
Invalidate the table in the query cache if something changed.
For the transactional algorithm to work the invalidation must be
before binlog writing and ha_autocommit_or_rollback
*/
- if (changed)
- query_cache_invalidate3(thd, table_list, 1);
- if (error <= 0 || !transactional_table)
+ query_cache_invalidate3(thd, table_list, 1);
+ }
+ if (changed && error <= 0 || thd->transaction.stmt.modified_non_trans_table
+ || was_insert_delayed)
+ {
+ if (mysql_bin_log.is_open())
{
- if (mysql_bin_log.is_open())
+ if (error <= 0)
{
- if (error <= 0)
- {
- /*
- [Guilhem wrote] Temporary errors may have filled
- thd->net.last_error/errno. For example if there has
- been a disk full error when writing the row, and it was
- MyISAM, then thd->net.last_error/errno will be set to
- "disk full"... and the my_pwrite() will wait until free
- space appears, and so when it finishes then the
- write_row() was entirely successful
- */
- /* todo: consider removing */
- thd->clear_error();
- }
- /* bug#22725:
-
- A query which per-row-loop can not be interrupted with
- KILLED, like INSERT, and that does not invoke stored
- routines can be binlogged with neglecting the KILLED error.
-
- If there was no error (error == zero) until after the end of
- inserting loop the KILLED flag that appeared later can be
- disregarded since previously possible invocation of stored
- routines did not result in any error due to the KILLED. In
- such case the flag is ignored for constructing binlog event.
- */
- DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0);
- if (thd->binlog_query(THD::ROW_QUERY_TYPE,
- thd->query, thd->query_length,
- transactional_table, FALSE,
- (error>0) ? thd->killed : THD::NOT_KILLED) &&
- transactional_table)
- {
- error=1;
- }
- }
- if (thd->transaction.stmt.modified_non_trans_table)
- thd->transaction.all.modified_non_trans_table= TRUE;
+ /*
+ [Guilhem wrote] Temporary errors may have filled
+ thd->net.last_error/errno. For example if there has
+ been a disk full error when writing the row, and it was
+ MyISAM, then thd->net.last_error/errno will be set to
+ "disk full"... and the my_pwrite() will wait until free
+ space appears, and so when it finishes then the
+ write_row() was entirely successful
+ */
+ /* todo: consider removing */
+ thd->clear_error();
+ }
+ /* bug#22725:
+
+ A query which per-row-loop can not be interrupted with
+ KILLED, like INSERT, and that does not invoke stored
+ routines can be binlogged with neglecting the KILLED error.
+
+ If there was no error (error == zero) until after the end of
+ inserting loop the KILLED flag that appeared later can be
+ disregarded since previously possible invocation of stored
+ routines did not result in any error due to the KILLED. In
+ such case the flag is ignored for constructing binlog event.
+ */
+ DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0);
+ if (thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_table, FALSE,
+ (error>0) ? thd->killed : THD::NOT_KILLED) &&
+ transactional_table)
+ {
+ error=1;
+ }
}
+ if (thd->transaction.stmt.modified_non_trans_table)
+ thd->transaction.all.modified_non_trans_table= TRUE;
}
DBUG_ASSERT(transactional_table || !changed ||
thd->transaction.stmt.modified_non_trans_table);
@@ -3094,6 +3093,7 @@ bool select_insert::send_eof()
bool const trans_table= table->file->has_transactions();
ulonglong id;
bool changed;
+ THD::killed_state killed_status= thd->killed;
DBUG_ENTER("select_insert::send_eof");
DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
trans_table, table->file->table_type()));
@@ -3128,7 +3128,7 @@ bool select_insert::send_eof()
thd->clear_error();
thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- trans_table, FALSE);
+ trans_table, FALSE, killed_status);
}
/*
We will call ha_autocommit_or_rollback() also for
@@ -3180,6 +3180,7 @@ void select_insert::abort() {
*/
if (table)
{
+ bool changed, transactional_table;
/*
If we are not in prelocked mode, we end the bulk insert started
before.
@@ -3201,20 +3202,20 @@ void select_insert::abort() {
If table creation failed, the number of rows modified will also be
zero, so no check for that is made.
*/
- if (info.copied || info.deleted || info.updated)
+ changed= (info.copied || info.deleted || info.updated);
+ transactional_table= table->file->has_transactions();
+ if (thd->transaction.stmt.modified_non_trans_table)
{
- DBUG_ASSERT(table != NULL);
- if (!table->file->has_transactions())
- {
if (mysql_bin_log.is_open())
thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
- table->file->has_transactions(), FALSE);
- if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table &&
- !can_rollback_data())
+ transactional_table, FALSE);
+ if (!thd->current_stmt_binlog_row_based && !can_rollback_data())
thd->transaction.all.modified_non_trans_table= TRUE;
- query_cache_invalidate3(thd, table, 1);
- }
+ if (changed)
+ query_cache_invalidate3(thd, table, 1);
}
+ DBUG_ASSERT(transactional_table || !changed ||
+ thd->transaction.stmt.modified_non_trans_table);
table->file->ha_release_auto_increment();
}
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 8bbe1e413b3..c96fbb80b0c 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -85,7 +85,8 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
- bool transactional_table);
+ bool transactional_table,
+ THD::killed_state killed_status);
#endif /* EMBEDDED_LIBRARY */
/*
@@ -134,6 +135,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
char *tdb= thd->db ? thd->db : db; // Result is never null
ulong skip_lines= ex->skip_lines;
bool transactional_table;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_load");
#ifdef EMBEDDED_LIBRARY
@@ -403,7 +405,16 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
free_blobs(table); /* if pack_blob was used */
table->copy_blobs=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
-
+ /*
+ simulated killing in the middle of per-row loop
+ must be effective for binlogging
+ */
+ DBUG_EXECUTE_IF("simulate_kill_bug27571",
+ {
+ error=1;
+ thd->killed= THD::KILL_QUERY;
+ };);
+ killed_status= (error == 0)? THD::NOT_KILLED : thd->killed;
/*
We must invalidate the table in query cache before binlog writing and
ha_autocommit_...
@@ -445,9 +456,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
/* If the file was not empty, wrote_create_file is true */
if (lf_info.wrote_create_file)
{
- if ((info.copied || info.deleted) && !transactional_table)
+ if (thd->transaction.stmt.modified_non_trans_table)
write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
+ ignore, transactional_table,
+ killed_status);
else
{
Delete_file_log_event d(thd, db, transactional_table);
@@ -492,8 +504,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
read_info.end_io_cache();
if (lf_info.wrote_create_file)
{
- write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
+ write_execute_load_query_log_event(thd, handle_duplicates, ignore,
+ transactional_table,killed_status);
}
}
}
@@ -523,7 +535,8 @@ err:
/* Not a very useful function; just to avoid duplication of code */
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
- bool transactional_table)
+ bool transactional_table,
+ THD::killed_state killed_err_arg)
{
Execute_load_query_log_event
e(thd, thd->query, thd->query_length,
@@ -531,7 +544,7 @@ static bool write_execute_load_query_log_event(THD *thd,
(char*)thd->lex->fname_end - (char*)thd->query,
(duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
(ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
- transactional_table, FALSE);
+ transactional_table, FALSE, killed_err_arg);
e.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
return mysql_bin_log.write(&e);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7e194ac76dc..c15370411f0 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2080,7 +2080,16 @@ mysql_execute_command(THD *thd)
if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL))
goto error;
pthread_mutex_lock(&LOCK_active_mi);
- res = show_master_info(thd,active_mi);
+ if (active_mi != NULL)
+ {
+ res = show_master_info(thd, active_mi);
+ }
+ else
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "the master info structure does not exist");
+ send_ok(thd);
+ }
pthread_mutex_unlock(&LOCK_active_mi);
break;
}
@@ -2935,6 +2944,13 @@ end_with_restore_list:
SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
OPTION_SETUP_TABLES_DONE,
del_result, unit, select_lex);
+ res|= thd->net.report_error;
+ if (unlikely(res))
+ {
+ /* If we had a another error reported earlier then this will be ignored */
+ del_result->send_error(ER_UNKNOWN_ERROR, "Execution of the query failed");
+ del_result->abort();
+ }
delete del_result;
}
else
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 0249af147b0..88040e2933c 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -369,7 +369,6 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
name=0; // Find first log
linfo.index_file_offset = 0;
- thd->current_linfo = &linfo;
if (mysql_bin_log.find_log_pos(&linfo, name, 1))
{
@@ -378,6 +377,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
goto err;
}
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->current_linfo = &linfo;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
{
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
@@ -1359,7 +1362,6 @@ bool mysql_show_binlog_events(THD* thd)
name=0; // Find first log
linfo.index_file_offset = 0;
- thd->current_linfo = &linfo;
if (mysql_bin_log.find_log_pos(&linfo, name, 1))
{
@@ -1367,6 +1369,10 @@ bool mysql_show_binlog_events(THD* thd)
goto err;
}
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->current_linfo = &linfo;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
goto err;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index a347482859f..9a7d7c59af3 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -29,6 +29,8 @@
#include "event_data_objects.h"
#include <my_dir.h>
+#define STR_OR_NIL(S) ((S) ? (S) : "<nil>")
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
@@ -3135,8 +3137,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
goto err;
}
DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'",
- lookup_field_vals.db_value.str,
- lookup_field_vals.table_value.str));
+ STR_OR_NIL(lookup_field_vals.db_value.str),
+ STR_OR_NIL(lookup_field_vals.table_value.str)));
if (!lookup_field_vals.wild_db_value && !lookup_field_vals.wild_table_value)
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 46022c9f743..ecb7acda61b 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -203,6 +203,7 @@ int mysql_update(THD *thd,
bool need_reopen;
ulonglong id;
List<Item> all_fields;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_update");
for ( ; ; )
@@ -714,45 +715,25 @@ int mysql_update(THD *thd,
thd->row_count++;
}
dup_key_found= 0;
-
- if (!transactional_table && updated > 0)
- thd->transaction.stmt.modified_non_trans_table= TRUE;
-
-
/*
- todo bug#27571: to avoid asynchronization of `error' and
- `error_code' of binlog event constructor
-
- The concept, which is a bit different for insert(!), is to
- replace `error' assignment with the following lines
-
- killed_status= thd->killed; // get the status of the volatile
-
- Notice: thd->killed is type of "state" whereas the lhs has
- "status" the suffix which translates according to WordNet: a state
- at a particular time - at the time of the end of per-row loop in
- our case. Binlogging ops are conducted with the status.
-
- error= (killed_status == THD::NOT_KILLED)? error : 1;
-
- which applies to most mysql_$query functions.
- Event's constructor will accept `killed_status' as an argument:
-
- Query_log_event qinfo(..., killed_status);
-
- thd->killed might be changed after killed_status had got cached and this
- won't affect binlogging event but other effects remain.
-
- Open issue: In a case the error happened not because of KILLED -
- and then KILLED was caught later still within the loop - we shall
- do something to avoid binlogging of incorrect ER_SERVER_SHUTDOWN
- error_code.
+ Caching the killed status to pass as the arg to query event constuctor;
+ The cached value can not change whereas the killed status can
+ (externally) since this point and change of the latter won't affect
+ binlogging.
+ It's assumed that if an error was set in combination with an effective
+ killed status then the error is due to killing.
*/
-
- if (thd->killed && !error)
- error= 1; // Aborted
- else if (will_batch &&
- (loc_error= table->file->exec_bulk_update(&dup_key_found)))
+ killed_status= thd->killed; // get the status of the volatile
+ // simulated killing after the loop must be ineffective for binlogging
+ DBUG_EXECUTE_IF("simulate_kill_bug27571",
+ {
+ thd->killed= THD::KILL_QUERY;
+ };);
+ error= (killed_status == THD::NOT_KILLED)? error : 1;
+
+ if (error &&
+ will_batch &&
+ (loc_error= table->file->exec_bulk_update(&dup_key_found)))
/*
An error has occurred when a batched update was performed and returned
an error indication. It cannot be an allowed duplicate key error since
@@ -774,6 +755,10 @@ int mysql_update(THD *thd,
if (will_batch)
table->file->end_bulk_update();
table->file->try_semi_consistent_read(0);
+
+ if (!transactional_table && updated > 0)
+ thd->transaction.stmt.modified_non_trans_table= TRUE;
+
end_read_record(&info);
delete select;
thd->proc_info= "end";
@@ -797,7 +782,7 @@ int mysql_update(THD *thd,
Sometimes we want to binlog even if we updated no rows, in case user used
it to be sure master and slave are in same state.
*/
- if ((error < 0) || (updated && !transactional_table))
+ if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
if (mysql_bin_log.is_open())
{
@@ -805,7 +790,7 @@ int mysql_update(THD *thd,
thd->clear_error();
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- transactional_table, FALSE) &&
+ transactional_table, FALSE, killed_status) &&
transactional_table)
{
error=1; // Rollback update
@@ -1215,8 +1200,8 @@ multi_update::multi_update(TABLE_LIST *table_list,
:all_tables(table_list), leaves(leaves_list), update_tables(0),
tmp_tables(0), updated(0), found(0), fields(field_list),
values(value_list), table_count(0), copy_field(0),
- handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0),
- transactional_tables(1), ignore(ignore_arg)
+ handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
+ transactional_tables(1), ignore(ignore_arg), error_handled(0)
{}
@@ -1418,7 +1403,6 @@ multi_update::initialize_tables(JOIN *join)
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
DBUG_RETURN(1);
main_table=join->join_tab->table;
- trans_safe= transactional_tables= main_table->file->has_transactions();
table_to_update= 0;
/* Any update has at least one pair (field, value) */
@@ -1713,12 +1697,14 @@ void multi_update::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
my_error(errcode, MYF(0), err);
- /* If nothing updated return */
- if (updated == 0) /* the counter might be reset in send_eof */
- return; /* and then the query has been binlogged */
+ /* the error was handled or nothing deleted and no side effects return */
+ if (error_handled ||
+ !thd->transaction.stmt.modified_non_trans_table && !updated)
+ return;
/* Something already updated so we have to invalidate cache */
- query_cache_invalidate3(thd, update_tables, 1);
+ if (updated)
+ query_cache_invalidate3(thd, update_tables, 1);
/*
If all tables that has been updated are trans safe then just do rollback.
If not attempt to do remaining updates.
@@ -1750,12 +1736,16 @@ void multi_update::send_error(uint errcode,const char *err)
*/
if (mysql_bin_log.is_open())
{
+ /*
+ THD::killed status might not have been set ON at time of an error
+ got caught and if happens later the killed error is written
+ into repl event.
+ */
thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
transactional_tables, FALSE);
}
- if (!trans_safe)
- thd->transaction.all.modified_non_trans_table= TRUE;
+ thd->transaction.all.modified_non_trans_table= TRUE;
}
DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
@@ -1947,11 +1937,20 @@ bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
ulonglong id;
+ THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("multi_update::send_eof");
thd->proc_info="updating reference tables";
- /* Does updates for the last n - 1 tables, returns 0 if ok */
+ /*
+ Does updates for the last n - 1 tables, returns 0 if ok;
+ error takes into account killed status gained in do_updates()
+ */
int local_error = (table_count) ? do_updates(0) : 0;
+ /*
+ if local_error is not set ON until after do_updates() then
+ later carried out killing should not affect binlogging.
+ */
+ killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
thd->proc_info= "end";
/* We must invalidate the query cache before binlog writing and
@@ -1978,11 +1977,9 @@ bool multi_update::send_eof()
{
if (local_error == 0)
thd->clear_error();
- else
- updated= 0; /* if there's an error binlog it here not in ::send_error */
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- transactional_tables, FALSE) &&
+ transactional_tables, FALSE, killed_status) &&
trans_safe)
{
local_error= 1; // Rollback update
@@ -1991,6 +1988,8 @@ bool multi_update::send_eof()
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
+ if (local_error != 0)
+ error_handled= TRUE; // to force early leave from ::send_error()
if (transactional_tables)
{
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 66316d1ab5c..04323568519 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -6751,6 +6751,7 @@ function_call_keyword:
| CURRENT_USER optional_braces
{
$$= new (YYTHD->mem_root) Item_func_current_user(Lex->current_context());
+ Lex->set_stmt_unsafe();
Lex->safe_to_cache_query= 0;
}
| DATE_SYM '(' expr ')'
@@ -6796,6 +6797,7 @@ function_call_keyword:
| USER '(' ')'
{
$$= new (YYTHD->mem_root) Item_func_user();
+ Lex->set_stmt_unsafe();
Lex->safe_to_cache_query=0;
}
| YEAR_SYM '(' expr ')'