summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am8
-rw-r--r--sql/field.cc206
-rw-r--r--sql/field.h2
-rw-r--r--sql/field_conv.cc18
-rw-r--r--sql/filesort.cc1
-rw-r--r--sql/ha_ndbcluster.cc110
-rw-r--r--sql/ha_ndbcluster.h2
-rw-r--r--sql/ha_ndbcluster_binlog.cc6
-rw-r--r--sql/ha_ndbcluster_binlog.h3
-rw-r--r--sql/ha_partition.cc21
-rw-r--r--sql/ha_partition.h4
-rw-r--r--sql/handler.cc97
-rw-r--r--sql/handler.h75
-rw-r--r--sql/item.cc7
-rw-r--r--sql/item_cmpfunc.cc296
-rw-r--r--sql/item_cmpfunc.h165
-rw-r--r--sql/item_strfunc.cc3
-rw-r--r--sql/item_sum.cc22
-rw-r--r--sql/item_timefunc.cc4
-rw-r--r--sql/item_xmlfunc.cc64
-rw-r--r--sql/key.cc44
-rw-r--r--sql/log.cc576
-rw-r--r--sql/log.h2
-rw-r--r--sql/log_event.cc601
-rw-r--r--sql/log_event.h30
-rw-r--r--sql/mysql_priv.h42
-rw-r--r--sql/mysqld.cc25
-rw-r--r--sql/opt_range.cc33
-rw-r--r--sql/opt_range.h11
-rw-r--r--sql/partition_info.cc6
-rw-r--r--sql/set_var.cc16
-rw-r--r--sql/share/errmsg.txt30
-rw-r--r--sql/slave.cc29
-rw-r--r--sql/slave.h5
-rw-r--r--sql/sp.cc22
-rw-r--r--sql/sp_head.cc6
-rw-r--r--sql/sql_acl.cc12
-rw-r--r--sql/sql_analyse.cc8
-rw-r--r--sql/sql_base.cc5
-rw-r--r--sql/sql_binlog.cc133
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_class.cc17
-rw-r--r--sql/sql_class.h1
-rw-r--r--sql/sql_cursor.cc12
-rw-r--r--sql/sql_insert.cc203
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_parse.cc17
-rw-r--r--sql/sql_partition.cc12
-rw-r--r--sql/sql_partition.h2
-rw-r--r--sql/sql_plugin.cc18
-rw-r--r--sql/sql_repl.h2
-rw-r--r--sql/sql_select.cc176
-rw-r--r--sql/sql_select.h14
-rw-r--r--sql/sql_show.cc21
-rw-r--r--sql/sql_table.cc5
-rw-r--r--sql/sql_tablespace.cc4
-rw-r--r--sql/sql_trigger.h5
-rw-r--r--sql/sql_update.cc42
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy18
-rw-r--r--sql/stacktrace.c2
-rw-r--r--sql/table.cc6
-rw-r--r--sql/unireg.cc39
63 files changed, 2252 insertions, 1123 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 502d2881eb0..38a99aaef88 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -27,7 +27,7 @@ INCLUDES = @ZLIB_INCLUDES@ \
WRAPLIBS= @WRAPLIBS@
SUBDIRS = share
libexec_PROGRAMS = mysqld
-noinst_PROGRAMS = gen_lex_hash
+EXTRA_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
LDADD = $(top_builddir)/vio/libvio.a \
@@ -157,7 +157,11 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
@echo "If it fails, re-run configure with --with-low-memory"
$(CXXCOMPILE) $(LM_CFLAGS) -c $<
-lex_hash.h: gen_lex_hash$(EXEEXT)
+# This generates lex_hash.h
+# NOTE Built sources should depend on their sources not the tool
+# this avoid the rebuild of the built files in a source dist
+lex_hash.h: gen_lex_hash.cc lex.h
+ $(MAKE) $(AM_MAKEFLAGS) gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@
# the following three should eventually be moved out of this directory
diff --git a/sql/field.cc b/sql/field.cc
index 2e478ead8b9..09e919d872a 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2365,11 +2365,16 @@ int Field_new_decimal::store(const char *from, uint length,
from, length, charset, &decimal_value)) &&
table->in_use->abort_on_warning)
{
+ /* Because "from" is not NUL-terminated and we use %s in the ER() */
+ String from_as_str;
+ from_as_str.copy(from, length, &my_charset_bin);
+
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- "decimal", from, field_name,
+ "decimal", from_as_str.c_ptr(), field_name,
(ulong) table->in_use->row_count);
+
DBUG_RETURN(err);
}
@@ -2382,13 +2387,20 @@ int Field_new_decimal::store(const char *from, uint length,
set_value_on_overflow(&decimal_value, decimal_value.sign());
break;
case E_DEC_BAD_NUM:
+ {
+ /* Because "from" is not NUL-terminated and we use %s in the ER() */
+ String from_as_str;
+ from_as_str.copy(from, length, &my_charset_bin);
+
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- "decimal", from, field_name,
+ "decimal", from_as_str.c_ptr(), field_name,
(ulong) table->in_use->row_count);
my_decimal_set_zero(&decimal_value);
+
break;
+ }
}
#ifndef DBUG_OFF
@@ -2553,28 +2565,26 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
- long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
- int error= 0;
+ int error;
if (unsigned_flag)
{
- if (tmp < 0)
- {
- tmp=0; /* purecov: inspected */
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
- error= 1;
- }
- else if (tmp > 255)
+ ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error);
+ if (error == MY_ERRNO_ERANGE || tmp > 255)
{
- tmp= 255;
+ set_if_smaller(tmp, 255);
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
+ else
+ error= 0;
+ ptr[0]= (char) tmp;
}
else
{
+ longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
if (tmp < -128)
{
tmp= -128;
@@ -2589,8 +2599,10 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
+ else
+ error= 0;
+ ptr[0]= (char) tmp;
}
- ptr[0]= (char) tmp;
return error;
}
@@ -2763,28 +2775,33 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
- long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
- int error= 0;
+ int error;
if (unsigned_flag)
{
- if (tmp < 0)
- {
- tmp=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
- error= 1;
- }
- else if (tmp > UINT_MAX16)
+ ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error);
+ if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX16)
{
- tmp=UINT_MAX16;
+ set_if_smaller(tmp, UINT_MAX16);
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
+ else
+ error= 0;
+#ifdef WORDS_BIGENDIAN
+ if (table->s->db_low_byte_first)
+ {
+ int2store(ptr,tmp);
+ }
+ else
+#endif
+ shortstore(ptr,(short) tmp);
}
else
{
+ longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
if (tmp < INT_MIN16)
{
tmp= INT_MIN16;
@@ -2799,15 +2816,17 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
- }
+ else
+ error= 0;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- {
- int2store(ptr,tmp);
- }
- else
+ if (table->s->db_low_byte_first)
+ {
+ int2store(ptr,tmp);
+ }
+ else
#endif
- shortstore(ptr,(short) tmp);
+ shortstore(ptr,(short) tmp);
+ }
return error;
}
@@ -3043,28 +3062,26 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
ASSERT_COLUMN_MARKED_FOR_WRITE;
int not_used; // We can ignore result from str2int
char *end;
- long tmp= my_strntol(cs, from, len, 10, &end, &not_used);
- int error= 0;
+ int error;
if (unsigned_flag)
{
- if (tmp < 0)
- {
- tmp=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
- error= 1;
- }
- else if (tmp >= (long) (1L << 24))
+ ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error);
+ if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX24)
{
- tmp=(long) (1L << 24)-1L;
+ set_if_smaller(tmp, UINT_MAX24);
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
+ else
+ error= 0;
+ int3store(ptr,tmp);
}
else
{
+ longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
if (tmp < INT_MIN24)
{
tmp= INT_MIN24;
@@ -3079,9 +3096,10 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
}
else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
error= 1;
+ else
+ error= 0;
+ int3store(ptr,tmp);
}
-
- int3store(ptr,tmp);
return error;
}
@@ -3290,58 +3308,43 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
int error;
char *end;
- tmp_scan= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES);
- len-= tmp_scan;
- from+= tmp_scan;
-
- end= (char*) from+len;
- tmp= cs->cset->strtoll10(cs, from, &end, &error);
-
- if (error != MY_ERRNO_EDOM)
+ if (unsigned_flag)
{
- if (unsigned_flag)
+ ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error);
+ if (error == MY_ERRNO_ERANGE || tmp > (ulonglong) UINT_MAX32)
{
- if (error < 0)
- {
- error= 1;
- tmp= 0;
- }
- else if ((ulonglong) tmp > (ulonglong) UINT_MAX32)
- {
- tmp= UINT_MAX32;
- error= 1;
- }
- else
- error= 0;
+ set_if_smaller(tmp, (ulonglong) UINT_MAX32);
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ error= 1;
}
+ else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
+ error= 1;
else
- {
- if (error < 0)
- {
- error= 0;
- if (tmp < INT_MIN32)
- {
- tmp= INT_MIN32;
- error= 1;
- }
- }
- else if (tmp > INT_MAX32)
- {
- tmp= INT_MAX32;
- error= 1;
- }
- }
+ error= 0;
+ store_tmp= (long) tmp;
}
- if (error)
+ else
{
- error= error != MY_ERRNO_EDOM ? 1 : 2;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
+ if (tmp < INT_MIN32)
+ {
+ tmp= INT_MIN32;
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ error= 1;
+ }
+ else if (tmp > INT_MAX32)
+ {
+ tmp=INT_MAX32;
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ error= 1;
+ }
+ else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
+ error= 1;
+ else
+ error= 0;
+ store_tmp= (long) tmp;
}
- else if (from+len != end && table->in_use->count_cuted_fields &&
- check_int(from,len,end,cs))
- error= 2;
- store_tmp= (long) tmp;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
{
@@ -3583,33 +3586,20 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
- longlong tmp;
int error= 0;
char *end;
+ ulonglong tmp;
- tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES);
- len-= (uint)tmp;
- from+= tmp;
- if (unsigned_flag)
- {
- if (!len || test_if_minus(cs, from, from + len))
- {
- tmp=0; // Set negative to 0
- error= 1;
- }
- else
- tmp=(longlong) my_strntoull(cs,from,len,10,&end,&error);
- }
- else
- tmp=my_strntoll(cs,from,len,10,&end,&error);
- if (error)
+ tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error);
+ if (error == MY_ERRNO_ERANGE)
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
- else if (from+len != end && table->in_use->count_cuted_fields &&
- check_int(from,len,end,cs))
- error= 2;
+ else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs))
+ error= 1;
+ else
+ error= 0;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
{
@@ -8163,7 +8153,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
(delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) ||
(!bit_len && delta < 0))
{
- set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len);
+ set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, bytes_in_rec);
if (table->in_use->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
@@ -9160,7 +9150,7 @@ create_field::create_field(Field *old_field,Field *orig_field)
case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break;
default: sql_type= FIELD_TYPE_LONG_BLOB; break;
}
- length=(length+charset->mbmaxlen-1) / charset->mbmaxlen;
+ length/= charset->mbmaxlen;
key_length/= charset->mbmaxlen;
break;
case MYSQL_TYPE_STRING:
diff --git a/sql/field.h b/sql/field.h
index 8a6bda500d3..9b81931d416 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1522,6 +1522,8 @@ public:
uint decimals, flags, pack_length, key_length;
Field::utype unireg_check;
TYPELIB *interval; // Which interval to use
+ TYPELIB *save_interval; // Temporary copy for the above
+ // Used only for UCS2 intervals
List<String> interval_list;
CHARSET_INFO *charset;
Field::geometry_type geom_type;
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 20d1e372a2c..7bc6c432d1c 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -427,6 +427,21 @@ static void do_varstring2(Copy_field *copy)
length);
}
+
+static void do_varstring2_mb(Copy_field *copy)
+{
+ int well_formed_error;
+ CHARSET_INFO *cs= copy->from_field->charset();
+ uint char_length= (copy->to_length - HA_KEY_BLOB_LENGTH) / cs->mbmaxlen;
+ uint from_length= uint2korr(copy->from_ptr);
+ const char *from_beg= copy->from_ptr + HA_KEY_BLOB_LENGTH;
+ uint length= cs->cset->well_formed_len(cs, from_beg, from_beg + from_length,
+ char_length, &well_formed_error);
+ int2store(copy->to_ptr, length);
+ memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, length);
+}
+
+
/***************************************************************************
** The different functions that fills in a Copy_field class
***************************************************************************/
@@ -586,7 +601,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
return do_field_string;
if (to_length != from_length)
return (((Field_varstring*) to)->length_bytes == 1 ?
- do_varstring1 : do_varstring2);
+ do_varstring1 : (from->charset()->mbmaxlen == 1 ?
+ do_varstring2 : do_varstring2_mb));
}
else if (to_length < from_length)
return (from->charset()->mbmaxlen == 1 ?
diff --git a/sql/filesort.cc b/sql/filesort.cc
index eb2960a0458..01f3bb97557 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1345,6 +1345,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
switch ((sortorder->result_type=sortorder->item->result_type())) {
case STRING_RESULT:
sortorder->length=sortorder->item->max_length;
+ set_if_smaller(sortorder->length, thd->variables.max_sort_length);
if (use_strnxfrm((cs=sortorder->item->collation.collation)))
{
sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index ccfe8e717fb..13ea471511c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -75,17 +75,25 @@ static const int max_transactions= 3; // should really be 2 but there is a trans
static uint ndbcluster_partition_flags();
static uint ndbcluster_alter_table_flags(uint flags);
static int ndbcluster_init(void *);
-static int ndbcluster_end(ha_panic_function flag);
-static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
-static int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info);
-static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond);
+static int ndbcluster_end(handlerton *hton, ha_panic_function flag);
+static bool ndbcluster_show_status(handlerton *hton, THD*,
+ stat_print_fn *,
+ enum ha_stat_type);
+static int ndbcluster_alter_tablespace(handlerton *hton,
+ THD* thd,
+ st_alter_tablespace *info);
+static int ndbcluster_fill_files_table(handlerton *hton,
+ THD *thd,
+ TABLE_LIST *tables,
+ COND *cond);
handlerton *ndbcluster_hton;
-static handler *ndbcluster_create_handler(TABLE_SHARE *table,
+static handler *ndbcluster_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
- return new (mem_root) ha_ndbcluster(table);
+ return new (mem_root) ha_ndbcluster(hton, table);
}
static uint ndbcluster_partition_flags()
@@ -179,8 +187,8 @@ static long ndb_cluster_node_id= 0;
static const char * ndb_connected_host= 0;
static long ndb_connected_port= 0;
static long ndb_number_of_replicas= 0;
-long ndb_number_of_storage_nodes= 0;
-long ndb_number_of_ready_storage_nodes= 0;
+long ndb_number_of_data_nodes= 0;
+long ndb_number_of_ready_data_nodes= 0;
long ndb_connect_count= 0;
static int update_status_variables(Ndb_cluster_connection *c)
@@ -189,8 +197,8 @@ static int update_status_variables(Ndb_cluster_connection *c)
ndb_connected_port= c->get_connected_port();
ndb_connected_host= c->get_connected_host();
ndb_number_of_replicas= 0;
- ndb_number_of_storage_nodes= c->no_db_nodes();
- ndb_number_of_ready_storage_nodes= c->get_no_ready();
+ ndb_number_of_ready_data_nodes= c->get_no_ready();
+ ndb_number_of_data_nodes= c->no_db_nodes();
ndb_connect_count= c->get_connect_count();
return 0;
}
@@ -200,7 +208,7 @@ SHOW_VAR ndb_status_variables[]= {
{"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
{"config_from_port", (char*) &ndb_connected_port, SHOW_LONG},
// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG},
- {"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG},
+ {"number_of_data_nodes",(char*) &ndb_number_of_data_nodes, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
};
@@ -2395,6 +2403,30 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_RETURN(next_result(buf));
}
+static
+int
+guess_scan_flags(NdbOperation::LockMode lm,
+ const NDBTAB* tab, const MY_BITMAP* readset)
+{
+ int flags= 0;
+ flags|= (lm == NdbOperation::LM_Read) ? NdbScanOperation::SF_KeyInfo : 0;
+ if (tab->checkColumns(0, 0) & 2)
+ {
+ int ret = tab->checkColumns(readset->bitmap, no_bytes_in_map(readset));
+
+ if (ret & 2)
+ { // If disk columns...use disk scan
+ flags |= NdbScanOperation::SF_DiskScan;
+ }
+ else if ((ret & 4) == 0 && (lm == NdbOperation::LM_Exclusive))
+ {
+ // If no mem column is set and exclusive...guess disk scan
+ flags |= NdbScanOperation::SF_DiskScan;
+ }
+ }
+ return flags;
+}
+
/*
Start full table scan in NDB
*/
@@ -2412,11 +2444,9 @@ int ha_ndbcluster::full_table_scan(byte *buf)
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
- bool need_pk = (lm == NdbOperation::LM_Read);
+ int flags= guess_scan_flags(lm, m_table, table->read_set);
if (!(op=trans->getNdbScanOperation(m_table)) ||
- op->readTuples(lm,
- (need_pk)?NdbScanOperation::SF_KeyInfo:0,
- parallelism))
+ op->readTuples(lm, flags, parallelism))
ERR_RETURN(trans->getNdbError());
m_active_cursor= op;
@@ -4208,7 +4238,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
Commit a transaction started in NDB
*/
-static int ndbcluster_commit(THD *thd, bool all)
+static int ndbcluster_commit(handlerton *hton, THD *thd, bool all)
{
int res= 0;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -4259,7 +4289,7 @@ static int ndbcluster_commit(THD *thd, bool all)
Rollback a transaction started in NDB
*/
-static int ndbcluster_rollback(THD *thd, bool all)
+static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all)
{
int res= 0;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -5559,8 +5589,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
HA_HAS_OWN_BINLOGGING | \
HA_HAS_RECORDS
-ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
- handler(ndbcluster_hton, table_arg),
+ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg):
+ handler(hton, table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
m_table(NULL),
@@ -5823,7 +5853,7 @@ int ha_ndbcluster::check_ndb_connection(THD* thd)
}
-static int ndbcluster_close_connection(THD *thd)
+static int ndbcluster_close_connection(handlerton *hton, THD *thd)
{
Thd_ndb *thd_ndb= get_thd_ndb(thd);
DBUG_ENTER("ndbcluster_close_connection");
@@ -5840,8 +5870,10 @@ static int ndbcluster_close_connection(THD *thd)
Try to discover one table from NDB
*/
-int ndbcluster_discover(THD* thd, const char *db, const char *name,
- const void** frmblob, uint* frmlen)
+int ndbcluster_discover(handlerton *hton, THD* thd, const char *db,
+ const char *name,
+ const void** frmblob,
+ uint* frmlen)
{
int error= 0;
NdbError ndb_error;
@@ -5921,7 +5953,8 @@ err:
*/
-int ndbcluster_table_exists_in_engine(THD* thd, const char *db,
+int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd,
+ const char *db,
const char *name)
{
Ndb* ndb;
@@ -6021,7 +6054,7 @@ int ndbcluster_drop_database_impl(const char *path)
DBUG_RETURN(ret);
}
-static void ndbcluster_drop_database(char *path)
+static void ndbcluster_drop_database(handlerton *hton, char *path)
{
THD *thd= current_thd;
DBUG_ENTER("ndbcluster_drop_database");
@@ -6182,7 +6215,9 @@ int ndbcluster_find_all_files(THD *thd)
DBUG_RETURN(-(skipped + unhandled));
}
-int ndbcluster_find_files(THD *thd,const char *db,const char *path,
+int ndbcluster_find_files(handlerton *hton, THD *thd,
+ const char *db,
+ const char *path,
const char *wild, bool dir, List<char> *files)
{
DBUG_ENTER("ndbcluster_find_files");
@@ -6292,7 +6327,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
DBUG_PRINT("info", ("%s existed on disk", name));
// The .ndb file exists on disk, but it's not in list of tables in ndb
// Verify that handler agrees table is gone.
- if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0)
+ if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == 0)
{
DBUG_PRINT("info", ("NDB says %s does not exists", file_name));
it.remove();
@@ -6546,7 +6581,7 @@ ndbcluster_init_error:
DBUG_RETURN(TRUE);
}
-static int ndbcluster_end(ha_panic_function type)
+static int ndbcluster_end(handlerton *hton, ha_panic_function type)
{
DBUG_ENTER("ndbcluster_end");
@@ -6630,7 +6665,7 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op)
share.db.length= 0;
share.table_name.str= (char *) tab_name;
share.table_name.length= strlen(tab_name);
- ha_ndbcluster error_handler(&share);
+ ha_ndbcluster error_handler(ndbcluster_hton, &share);
error_handler.print_error(error, MYF(0));
DBUG_VOID_RETURN;
}
@@ -9710,7 +9745,7 @@ err:
Implements the SHOW NDB STATUS command.
*/
bool
-ndbcluster_show_status(THD* thd, stat_print_fn *stat_print,
+ndbcluster_show_status(handlerton *hton, THD* thd, stat_print_fn *stat_print,
enum ha_stat_type stat_type)
{
char buf[IO_SIZE];
@@ -9732,14 +9767,14 @@ ndbcluster_show_status(THD* thd, stat_print_fn *stat_print,
"cluster_node_id=%u, "
"connected_host=%s, "
"connected_port=%u, "
- "number_of_storage_nodes=%u, "
- "number_of_ready_storage_nodes=%u, "
+ "number_of_data_nodes=%u, "
+ "number_of_ready_data_nodes=%u, "
"connect_count=%u",
ndb_cluster_node_id,
ndb_connected_host,
ndb_connected_port,
- ndb_number_of_storage_nodes,
- ndb_number_of_ready_storage_nodes,
+ ndb_number_of_data_nodes,
+ ndb_number_of_ready_data_nodes,
ndb_connect_count);
if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
STRING_WITH_LEN("connection"), buf, buflen))
@@ -10174,7 +10209,7 @@ bool set_up_undofile(st_alter_tablespace *info,
return false;
}
-int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
+int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info)
{
DBUG_ENTER("ha_ndbcluster::alter_tablespace");
@@ -10435,7 +10470,9 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
DBUG_RETURN(TRUE);
}
-static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables,
+static int ndbcluster_fill_files_table(handlerton *hton,
+ THD *thd,
+ TABLE_LIST *tables,
COND *cond)
{
TABLE* table= tables->table;
@@ -10756,7 +10793,7 @@ SHOW_VAR ndb_status_variables_export[]= {
};
struct st_mysql_storage_engine ndbcluster_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION, ndbcluster_hton };
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
mysql_declare_plugin(ndbcluster)
{
@@ -10765,6 +10802,7 @@ mysql_declare_plugin(ndbcluster)
ndbcluster_hton_name,
"MySQL AB",
"Clustered, fault-tolerant tables",
+ PLUGIN_LICENSE_GPL,
ndbcluster_init, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 5cd5bb2c716..19ff513f9b1 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -627,7 +627,7 @@ class Thd_ndb
class ha_ndbcluster: public handler
{
public:
- ha_ndbcluster(TABLE_SHARE *table);
+ ha_ndbcluster(handlerton *hton, TABLE_SHARE *table);
~ha_ndbcluster();
int ha_initialise();
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 3fc84ad1b66..5f5c8bcb221 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -506,7 +506,7 @@ ndbcluster_binlog_index_purge_file(THD *thd, const char *file)
}
static void
-ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
+ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command,
const char *query, uint query_length,
const char *db, const char *table_name)
{
@@ -637,7 +637,9 @@ static void ndbcluster_reset_slave(THD *thd)
/*
Initialize the binlog part of the ndb handlerton
*/
-static int ndbcluster_binlog_func(THD *thd, enum_binlog_func fn, void *arg)
+static int ndbcluster_binlog_func(handlerton *hton, THD *thd,
+ enum_binlog_func fn,
+ void *arg)
{
switch(fn)
{
diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h
index 822ebf3e358..233d1a58aaa 100644
--- a/sql/ha_ndbcluster_binlog.h
+++ b/sql/ha_ndbcluster_binlog.h
@@ -31,6 +31,8 @@ extern ulong ndb_extra_logging;
#define NDB_INVALID_SCHEMA_OBJECT 241
+extern handlerton *ndbcluster_hton;
+
/*
The numbers below must not change as they
are passed between mysql servers, and if changed
@@ -103,7 +105,6 @@ extern pthread_mutex_t injector_mutex;
extern pthread_cond_t injector_cond;
extern unsigned char g_node_id_map[max_ndb_nodes];
-extern handlerton *ndbcluster_hton;
extern pthread_t ndb_util_thread;
extern pthread_mutex_t LOCK_ndb_util_thread;
extern pthread_cond_t COND_ndb_util_thread;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 1e75dd08f38..713be1b7b5d 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -69,16 +69,17 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
MODULE create/delete handler object
****************************************************************************/
-static handler *partition_create_handler(TABLE_SHARE *share,
+static handler *partition_create_handler(handlerton *hton,
+ TABLE_SHARE *share,
MEM_ROOT *mem_root);
static uint partition_flags();
static uint alter_table_flags(uint flags);
-handlerton *partition_hton;
static int partition_initialize(void *p)
{
+ handlerton *partition_hton;
partition_hton= (handlerton *)p;
partition_hton->state= SHOW_OPTION_YES;
@@ -102,10 +103,11 @@ static int partition_initialize(void *p)
New partition object
*/
-static handler *partition_create_handler(TABLE_SHARE *share,
+static handler *partition_create_handler(handlerton *hton,
+ TABLE_SHARE *share,
MEM_ROOT *mem_root)
{
- ha_partition *file= new (mem_root) ha_partition(share);
+ ha_partition *file= new (mem_root) ha_partition(hton, share);
if (file && file->initialise_partition(mem_root))
{
delete file;
@@ -155,8 +157,8 @@ static uint alter_table_flags(uint flags __attribute__((unused)))
NONE
*/
-ha_partition::ha_partition(TABLE_SHARE *share)
- :handler(partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
+ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
+ :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
m_is_sub_partitioned(0)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
@@ -176,8 +178,8 @@ ha_partition::ha_partition(TABLE_SHARE *share)
NONE
*/
-ha_partition::ha_partition(partition_info *part_info)
- :handler(partition_hton, NULL), m_part_info(part_info),
+ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
+ :handler(hton, NULL), m_part_info(part_info),
m_create_handler(TRUE),
m_is_sub_partitioned(m_part_info->is_sub_partitioned())
@@ -5659,7 +5661,7 @@ static int free_share(PARTITION_SHARE *share)
#endif /* NOT_USED */
struct st_mysql_storage_engine partition_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION, partition_hton };
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
mysql_declare_plugin(partition)
{
@@ -5668,6 +5670,7 @@ mysql_declare_plugin(partition)
"partition",
"Mikael Ronstrom, MySQL AB",
"Partition Storage Engine Helper",
+ PLUGIN_LICENSE_GPL,
partition_initialize, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100, /* 1.0 */
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index d76591b7514..008bda01bc9 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -150,8 +150,8 @@ public:
partition handler.
-------------------------------------------------------------------------
*/
- ha_partition(TABLE_SHARE * table);
- ha_partition(partition_info * part_info);
+ ha_partition(handlerton *hton, TABLE_SHARE * table);
+ ha_partition(handlerton *hton, partition_info * part_info);
~ha_partition();
/*
A partition handler has no characteristics in itself. It only inherits
diff --git a/sql/handler.cc b/sql/handler.cc
index 7848552f3de..ccf1a1ef8d9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -170,7 +170,7 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
handlerton *hton= ha_default_handlerton(current_thd);
- return (hton && hton->create) ? hton->create(table, mem_root) : NULL;
+ return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL;
}
@@ -232,7 +232,7 @@ handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
{
- if ((file= db_type->create(share, alloc)))
+ if ((file= db_type->create(db_type, share, alloc)))
file->init();
DBUG_RETURN(file);
}
@@ -251,7 +251,7 @@ handler *get_ha_partition(partition_info *part_info)
{
ha_partition *partition;
DBUG_ENTER("get_ha_partition");
- if ((partition= new ha_partition(part_info)))
+ if ((partition= new ha_partition(partition_hton, part_info)))
{
if (partition->initialise_partition(current_thd->mem_root))
{
@@ -376,7 +376,7 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
case SHOW_OPTION_YES:
if (installed_htons[hton->db_type] == hton)
installed_htons[hton->db_type]= NULL;
- if (hton->panic && hton->panic(HA_PANIC_CLOSE))
+ if (hton->panic && hton->panic(hton, HA_PANIC_CLOSE))
DBUG_RETURN(1);
break;
};
@@ -465,6 +465,26 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
hton->state= SHOW_OPTION_DISABLED;
break;
}
+
+ /*
+ This is entirely for legacy. We will create a new "disk based" hton and a
+ "memory" hton which will be configurable longterm. We should be able to
+ remove partition and myisammrg.
+ */
+ switch (hton->db_type) {
+ case DB_TYPE_HEAP:
+ heap_hton= hton;
+ break;
+ case DB_TYPE_MYISAM:
+ myisam_hton= hton;
+ break;
+ case DB_TYPE_PARTITION_DB:
+ partition_hton= hton;
+ break;
+ default:
+ break;
+ };
+
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@@ -498,7 +518,7 @@ static my_bool panic_handlerton(THD *unused1, st_plugin_int *plugin, void *arg)
{
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->panic)
- ((int*)arg)[0]|= hton->panic((enum ha_panic_function)((int*)arg)[1]);
+ ((int*)arg)[0]|= hton->panic(hton, (enum ha_panic_function)((int*)arg)[1]);
return FALSE;
}
@@ -520,7 +540,7 @@ static my_bool dropdb_handlerton(THD *unused1, st_plugin_int *plugin,
{
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->drop_database)
- hton->drop_database((char *)path);
+ hton->drop_database(hton, (char *)path);
return FALSE;
}
@@ -541,7 +561,7 @@ static my_bool closecon_handlerton(THD *thd, st_plugin_int *plugin,
*/
if (hton->state == SHOW_OPTION_YES && hton->close_connection &&
thd->ha_data[hton->slot])
- hton->close_connection(thd);
+ hton->close_connection(hton, thd);
return FALSE;
}
@@ -617,7 +637,7 @@ int ha_prepare(THD *thd)
statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status);
if ((*ht)->prepare)
{
- if ((err= (*(*ht)->prepare)(thd, all)))
+ if ((err= (*(*ht)->prepare)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
ha_rollback_trans(thd, all);
@@ -691,7 +711,7 @@ int ha_commit_trans(THD *thd, bool all)
for (; *ht && !error; ht++)
{
int err;
- if ((err= (*(*ht)->prepare)(thd, all)))
+ if ((err= (*(*ht)->prepare)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error= 1;
@@ -738,7 +758,7 @@ int ha_commit_one_phase(THD *thd, bool all)
for (ht=trans->ht; *ht; ht++)
{
int err;
- if ((err= (*(*ht)->commit)(thd, all)))
+ if ((err= (*(*ht)->commit)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error=1;
@@ -794,7 +814,7 @@ int ha_rollback_trans(THD *thd, bool all)
for (handlerton **ht=trans->ht; *ht; ht++)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, all)))
+ if ((err= (*(*ht)->rollback)(*ht, thd, all)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
@@ -871,7 +891,7 @@ static my_bool xacommit_handlerton(THD *unused1, st_plugin_int *plugin,
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- hton->commit_by_xid(((struct xahton_st *)arg)->xid);
+ hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid);
((struct xahton_st *)arg)->result= 0;
}
return FALSE;
@@ -883,7 +903,7 @@ static my_bool xarollback_handlerton(THD *unused1, st_plugin_int *plugin,
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- hton->rollback_by_xid(((struct xahton_st *)arg)->xid);
+ hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid);
((struct xahton_st *)arg)->result= 0;
}
return FALSE;
@@ -993,7 +1013,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin,
if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- while ((got= hton->recover(info->list, info->len)) > 0 )
+ while ((got= hton->recover(hton, info->list, info->len)) > 0 )
{
sql_print_information("Found %d prepared transaction(s) in %s",
got, hton2plugin[hton->slot]->name.str);
@@ -1024,7 +1044,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin,
char buf[XIDDATASIZE*4+6]; // see xid_to_str
sql_print_information("commit xid %s", xid_to_str(buf, info->list+i));
#endif
- hton->commit_by_xid(info->list+i);
+ hton->commit_by_xid(hton, info->list+i);
}
else
{
@@ -1033,7 +1053,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin,
sql_print_information("rollback xid %s",
xid_to_str(buf, info->list+i));
#endif
- hton->rollback_by_xid(info->list+i);
+ hton->rollback_by_xid(hton, info->list+i);
}
}
if (got < info->len)
@@ -1179,7 +1199,7 @@ static my_bool release_temporary_latches(THD *thd, st_plugin_int *plugin,
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->release_temporary_latches)
- hton->release_temporary_latches(thd);
+ hton->release_temporary_latches(hton, thd);
return FALSE;
}
@@ -1212,7 +1232,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
{
int err;
DBUG_ASSERT((*ht)->savepoint_set != 0);
- if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_rollback)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
@@ -1228,7 +1248,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
for (; *ht ; ht++)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt)))
+ if ((err= (*(*ht)->rollback)(*ht, thd, !thd->in_sub_stmt)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
@@ -1262,7 +1282,7 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv)
error=1;
break;
}
- if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_set)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
@@ -1288,7 +1308,9 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
int err;
if (!(*ht)->savepoint_release)
continue;
- if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_release)(*ht, thd,
+ (byte *)(sv+1)+
+ (*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
@@ -1305,7 +1327,7 @@ static my_bool snapshot_handlerton(THD *thd, st_plugin_int *plugin,
if (hton->state == SHOW_OPTION_YES &&
hton->start_consistent_snapshot)
{
- hton->start_consistent_snapshot(thd);
+ hton->start_consistent_snapshot(hton, thd);
*((bool *)arg)= false;
}
return FALSE;
@@ -1333,7 +1355,8 @@ static my_bool flush_handlerton(THD *thd, st_plugin_int *plugin,
void *arg)
{
handlerton *hton= (handlerton *)plugin->data;
- if (hton->state == SHOW_OPTION_YES && hton->flush_logs && hton->flush_logs())
+ if (hton->state == SHOW_OPTION_YES && hton->flush_logs &&
+ hton->flush_logs(hton))
return TRUE;
return FALSE;
}
@@ -1350,7 +1373,7 @@ bool ha_flush_logs(handlerton *db_type)
else
{
if (db_type->state != SHOW_OPTION_YES ||
- (db_type->flush_logs && db_type->flush_logs()))
+ (db_type->flush_logs && db_type->flush_logs(db_type)))
return TRUE;
}
return FALSE;
@@ -2765,7 +2788,9 @@ static my_bool discover_handlerton(THD *thd, st_plugin_int *plugin,
st_discover_args *vargs= (st_discover_args *)arg;
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->discover &&
- (!(hton->discover(thd, vargs->db, vargs->name, vargs->frmblob, vargs->frmlen))))
+ (!(hton->discover(hton, thd, vargs->db, vargs->name,
+ vargs->frmblob,
+ vargs->frmlen))))
return TRUE;
return FALSE;
@@ -2814,7 +2839,7 @@ static my_bool find_files_handlerton(THD *thd, st_plugin_int *plugin,
if (hton->state == SHOW_OPTION_YES && hton->find_files)
- if (hton->find_files(thd, vargs->db, vargs->path, vargs->wild,
+ if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild,
vargs->dir, vargs->files))
return TRUE;
@@ -2861,7 +2886,7 @@ static my_bool table_exists_in_engine_handlerton(THD *thd, st_plugin_int *plugin
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine)
- if ((hton->table_exists_in_engine(thd, vargs->db, vargs->name)) == 1)
+ if ((hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name)) == 1)
return TRUE;
return FALSE;
@@ -2930,7 +2955,7 @@ static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn)
uint i= 0, sz= hton_list.sz;
while(i < sz)
- hton_list.hton[i++]->binlog_func(thd, bfn->fn, bfn->arg);
+ hton_list.hton[i++]->binlog_func(hton, thd, bfn->fn, bfn->arg);
return FALSE;
}
@@ -2977,12 +3002,12 @@ struct binlog_log_query_st
};
static my_bool binlog_log_query_handlerton2(THD *thd,
- const handlerton *hton,
+ handlerton *hton,
void *args)
{
struct binlog_log_query_st *b= (struct binlog_log_query_st*)args;
if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query)
- hton->binlog_log_query(thd,
+ hton->binlog_log_query(hton, thd,
b->binlog_command,
b->query,
b->query_length,
@@ -2995,10 +3020,10 @@ static my_bool binlog_log_query_handlerton(THD *thd,
st_plugin_int *plugin,
void *args)
{
- return binlog_log_query_handlerton2(thd, (const handlerton *)plugin->data, args);
+ return binlog_log_query_handlerton2(thd, (handlerton *)plugin->data, args);
}
-void ha_binlog_log_query(THD *thd, const handlerton *hton,
+void ha_binlog_log_query(THD *thd, handlerton *hton,
enum_binlog_command binlog_command,
const char *query, uint query_length,
const char *db, const char *table_name)
@@ -3296,7 +3321,7 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin,
handlerton *hton= (handlerton *)plugin->data;
handler *file;
if (hton->state == SHOW_OPTION_YES && hton->create &&
- (file= hton->create((TABLE_SHARE*) 0, current_thd->mem_root)))
+ (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root)))
{
List_iterator_fast<char> it(*found_exts);
const char **ext, *old_ext;
@@ -3371,7 +3396,7 @@ static my_bool showstat_handlerton(THD *thd, st_plugin_int *plugin,
enum ha_stat_type stat= *(enum ha_stat_type *) arg;
handlerton *hton= (handlerton *)plugin->data;
if (hton->state == SHOW_OPTION_YES && hton->show_status &&
- hton->show_status(thd, stat_print, stat))
+ hton->show_status(hton, thd, stat_print, stat))
return TRUE;
return FALSE;
}
@@ -3405,7 +3430,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
}
else
result= db_type->show_status &&
- db_type->show_status(thd, stat_print, stat) ? 1 : 0;
+ db_type->show_status(db_type, thd, stat_print, stat) ? 1 : 0;
}
if (!result)
@@ -3726,7 +3751,7 @@ int example_of_iterator_using_for_logs_cleanup(handlerton *hton)
if (!hton->create_iterator)
return 1; /* iterator creator is not supported */
- if ((*hton->create_iterator)(HA_TRANSACTLOG_ITERATOR, &iterator) !=
+ if ((*hton->create_iterator)(hton, HA_TRANSACTLOG_ITERATOR, &iterator) !=
HA_ITERATOR_OK)
{
/* error during creation of log iterator or iterator is not supported */
diff --git a/sql/handler.h b/sql/handler.h
index 2efe9f3ce6a..5e26d9c7b63 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -449,7 +449,7 @@ class st_alter_tablespace : public Sql_alloc
ulonglong autoextend_size;
ulonglong max_size;
uint nodegroup_id;
- const handlerton *storage_engine;
+ handlerton *storage_engine;
bool wait_until_completed;
const char *ts_comment;
enum tablespace_access_mode ts_access_mode;
@@ -605,18 +605,18 @@ struct handlerton
this storage area - set it to something, so that MySQL would know
this storage engine was accessed in this connection
*/
- int (*close_connection)(THD *thd);
+ int (*close_connection)(handlerton *hton, THD *thd);
/*
sv points to an uninitialized storage area of requested size
(see savepoint_offset description)
*/
- int (*savepoint_set)(THD *thd, void *sv);
+ int (*savepoint_set)(handlerton *hton, THD *thd, void *sv);
/*
sv points to a storage area, that was earlier passed
to the savepoint_set call
*/
- int (*savepoint_rollback)(THD *thd, void *sv);
- int (*savepoint_release)(THD *thd, void *sv);
+ int (*savepoint_rollback)(handlerton *hton, THD *thd, void *sv);
+ int (*savepoint_release)(handlerton *hton, THD *thd, void *sv);
/*
'all' is true if it's a real commit, that makes persistent changes
'all' is false if it's not in fact a commit but an end of the
@@ -624,25 +624,25 @@ struct handlerton
NOTE 'all' is also false in auto-commit mode where 'end of statement'
and 'real commit' mean the same event.
*/
- int (*commit)(THD *thd, bool all);
- int (*rollback)(THD *thd, bool all);
- int (*prepare)(THD *thd, bool all);
- int (*recover)(XID *xid_list, uint len);
- int (*commit_by_xid)(XID *xid);
- int (*rollback_by_xid)(XID *xid);
- void *(*create_cursor_read_view)();
- void (*set_cursor_read_view)(void *);
- void (*close_cursor_read_view)(void *);
- handler *(*create)(TABLE_SHARE *table, MEM_ROOT *mem_root);
- void (*drop_database)(char* path);
- int (*panic)(enum ha_panic_function flag);
- int (*start_consistent_snapshot)(THD *thd);
- bool (*flush_logs)();
- bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
+ int (*commit)(handlerton *hton, THD *thd, bool all);
+ int (*rollback)(handlerton *hton, THD *thd, bool all);
+ int (*prepare)(handlerton *hton, THD *thd, bool all);
+ int (*recover)(handlerton *hton, XID *xid_list, uint len);
+ int (*commit_by_xid)(handlerton *hton, XID *xid);
+ int (*rollback_by_xid)(handlerton *hton, XID *xid);
+ void *(*create_cursor_read_view)(handlerton *hton, THD *thd);
+ void (*set_cursor_read_view)(handlerton *hton, THD *thd, void *read_view);
+ void (*close_cursor_read_view)(handlerton *hton, THD *thd, void *read_view);
+ handler *(*create)(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root);
+ void (*drop_database)(handlerton *hton, char* path);
+ int (*panic)(handlerton *hton, enum ha_panic_function flag);
+ int (*start_consistent_snapshot)(handlerton *hton, THD *thd);
+ bool (*flush_logs)(handlerton *hton);
+ bool (*show_status)(handlerton *hton, THD *thd, stat_print_fn *print, enum ha_stat_type stat);
uint (*partition_flags)();
uint (*alter_table_flags)(uint flags);
- int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info);
- int (*fill_files_table)(THD *thd,
+ int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info);
+ int (*fill_files_table)(handlerton *hton, THD *thd,
struct st_table_list *tables,
class Item *cond);
uint32 flags; /* global handler flags */
@@ -650,11 +650,12 @@ struct handlerton
Those handlerton functions below are properly initialized at handler
init.
*/
- int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg);
- void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command,
+ int (*binlog_func)(handlerton *hton, THD *thd, enum_binlog_func fn, void *arg);
+ void (*binlog_log_query)(handlerton *hton, THD *thd,
+ enum_binlog_command binlog_command,
const char *query, uint query_length,
const char *db, const char *table_name);
- int (*release_temporary_latches)(THD *thd);
+ int (*release_temporary_latches)(handlerton *hton, THD *thd);
/*
Get log status.
@@ -663,21 +664,26 @@ struct handlerton
(see example of implementation in handler.cc, TRANS_LOG_MGM_EXAMPLE_CODE)
*/
- enum log_status (*get_log_status)(char *log);
+ enum log_status (*get_log_status)(handlerton *hton, char *log);
/*
Iterators creator.
Presence of the pointer should be checked before using
*/
enum handler_create_iterator_result
- (*create_iterator)(enum handler_iterator_type type,
+ (*create_iterator)(handlerton *hton, enum handler_iterator_type type,
struct handler_iterator *fill_this_in);
- int (*discover)(THD* thd, const char *db, const char *name,
- const void** frmblob, uint* frmlen);
- int (*find_files)(THD *thd,const char *db,const char *path,
+ int (*discover)(handlerton *hton, THD* thd, const char *db,
+ const char *name,
+ const void** frmblob,
+ uint* frmlen);
+ int (*find_files)(handlerton *hton, THD *thd,
+ const char *db,
+ const char *path,
const char *wild, bool dir, List<char> *files);
- int (*table_exists_in_engine)(THD* thd, const char *db,
+ int (*table_exists_in_engine)(handlerton *hton, THD* thd, const char *db,
const char *name);
+ uint32 license; /* Flag for Engine License */
};
@@ -691,6 +697,7 @@ struct handlerton
#define HTON_NOT_USER_SELECTABLE (1 << 5)
#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables
+#define HTON_NO_PARTITION (1 << 8) //You can not partition these tables
typedef struct st_thd_trans
{
@@ -893,7 +900,7 @@ class handler :public Sql_alloc
virtual void start_bulk_insert(ha_rows rows) {}
virtual int end_bulk_insert() {return 0; }
public:
- const handlerton *ht; /* storage engine of this handler */
+ handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
byte *dup_ref; /* Pointer to duplicate row */
@@ -943,7 +950,7 @@ public:
*/
Discrete_interval auto_inc_interval_for_cur_row;
- handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
+ handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg),
ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
ref_length(sizeof(my_off_t)),
@@ -1716,7 +1723,7 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht);
int ha_reset_logs(THD *thd);
int ha_binlog_index_purge_file(THD *thd, const char *file);
void ha_reset_slave(THD *thd);
-void ha_binlog_log_query(THD *thd, const handlerton *db_type,
+void ha_binlog_log_query(THD *thd, handlerton *db_type,
enum_binlog_command binlog_command,
const char *query, uint query_length,
const char *db, const char *table_name);
diff --git a/sql/item.cc b/sql/item.cc
index 39f1ac3feea..f8a8b4a6272 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1210,6 +1210,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
split_sum_func(thd, ref_pointer_array, fields);
}
else if ((type() == SUM_FUNC_ITEM || (used_tables() & ~PARAM_TABLE_BIT)) &&
+ type() != SUBSELECT_ITEM &&
(type() != REF_ITEM ||
((Item_ref*)this)->ref_type() == Item_ref::VIEW_REF))
{
@@ -3795,6 +3796,7 @@ void Item_field::cleanup()
I.e. we can drop 'field'.
*/
field= result_field= 0;
+ null_value= FALSE;
DBUG_VOID_RETURN;
}
@@ -5710,11 +5712,6 @@ void Item_trigger_field::cleanup()
}
-/*
- If item is a const function, calculate it and return a const item
- The original item is freed if not returned
-*/
-
Item_result item_cmp_type(Item_result a,Item_result b)
{
if (a == STRING_RESULT && b == STRING_RESULT)
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 0aa6d432966..135e4596996 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -66,12 +66,10 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems)
/*
Aggregates result types from the array of items.
- SYNOPSIS:
+ SYNOPSIS
agg_cmp_type()
- thd thread handle
- type [out] the aggregated type
- items array of items to aggregate the type from
- nitems number of items in the array
+ items array of items to aggregate the type from
+ nitems number of items in the array
DESCRIPTION
This function aggregates result types from the array of items. Found type
@@ -79,12 +77,43 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems)
Aggregation itself is performed by the item_cmp_type() function.
*/
-static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
+static Item_result agg_cmp_type(Item **items, uint nitems)
{
uint i;
- type[0]= items[0]->result_type();
+ Item_result type= items[0]->result_type();
for (i= 1 ; i < nitems ; i++)
- type[0]= item_cmp_type(type[0], items[i]->result_type());
+ type= item_cmp_type(type, items[i]->result_type());
+ return type;
+}
+
+
+/*
+ Collects different types for comparison of first item with each other items
+
+ SYNOPSIS
+ collect_cmp_types()
+ items Array of items to collect types from
+ nitems Number of items in the array
+
+ DESCRIPTION
+ This function collects different result types for comparison of the first
+ item in the list with each of the remaining items in the 'items' array.
+
+ RETURN
+ Bitmap of collected types
+*/
+
+static uint collect_cmp_types(Item **items, uint nitems)
+{
+ uint i;
+ uint found_types;
+ Item_result left_result= items[0]->result_type();
+ DBUG_ASSERT(nitems > 1);
+ found_types= 0;
+ for (i= 1; i < nitems ; i++)
+ found_types|= 1<< (uint)item_cmp_type(left_result,
+ items[i]->result_type());
+ return found_types;
}
@@ -1117,7 +1146,7 @@ void Item_func_between::fix_length_and_dec()
*/
if (!args[0] || !args[1] || !args[2])
return;
- agg_cmp_type(thd, &cmp_type, args, 3);
+ cmp_type= agg_cmp_type(args, 3);
if (cmp_type == STRING_RESULT &&
agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1))
return;
@@ -1597,94 +1626,65 @@ Item_func_nullif::is_null()
return (null_value= (!cmp.compare() ? 1 : args[0]->null_value));
}
+
/*
- CASE expression
Return the matching ITEM or NULL if all compares (including else) failed
+
+ SYNOPSIS
+ find_item()
+ str Buffer string
+
+ DESCRIPTION
+ Find and return matching items for CASE or ELSE item if all compares
+ are failed or NULL if ELSE item isn't defined.
+
+ IMPLEMENTATION
+ In order to do correct comparisons of the CASE expression (the expression
+ between CASE and the first WHEN) with each WHEN expression several
+ comparators are used. One for each result type. CASE expression can be
+ evaluated up to # of different result types are used. To check whether
+ the CASE expression already was evaluated for a particular result type
+ a bit mapped variable value_added_map is used. Result types are mapped
+ to it according to their int values i.e. STRING_RESULT is mapped to bit
+ 0, REAL_RESULT to bit 1, so on.
+
+ RETURN
+ NULL - Nothing found and there is no ELSE expression defined
+ item - Found item or ELSE item if defined and all comparisons are
+ failed
*/
Item *Item_func_case::find_item(String *str)
{
- String *first_expr_str, *tmp;
- my_decimal *first_expr_dec, first_expr_dec_val;
- longlong first_expr_int;
- double first_expr_real;
- char buff[MAX_FIELD_WIDTH];
- String buff_str(buff,sizeof(buff),default_charset());
-
- /* These will be initialized later */
- LINT_INIT(first_expr_str);
- LINT_INIT(first_expr_int);
- LINT_INIT(first_expr_real);
- LINT_INIT(first_expr_dec);
-
- if (first_expr_num != -1)
- {
- switch (cmp_type)
- {
- case STRING_RESULT:
- // We can't use 'str' here as this may be overwritten
- if (!(first_expr_str= args[first_expr_num]->val_str(&buff_str)))
- return else_expr_num != -1 ? args[else_expr_num] : 0; // Impossible
- break;
- case INT_RESULT:
- first_expr_int= args[first_expr_num]->val_int();
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case REAL_RESULT:
- first_expr_real= args[first_expr_num]->val_real();
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case DECIMAL_RESULT:
- first_expr_dec= args[first_expr_num]->val_decimal(&first_expr_dec_val);
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case ROW_RESULT:
- default:
- // This case should never be chosen
- DBUG_ASSERT(0);
- break;
- }
- }
+ uint value_added_map= 0;
- // Compare every WHEN argument with it and return the first match
- for (uint i=0 ; i < ncases ; i+=2)
+ if (first_expr_num == -1)
{
- if (first_expr_num == -1)
+ for (uint i=0 ; i < ncases ; i+=2)
{
// No expression between CASE and the first WHEN
if (args[i]->val_bool())
return args[i+1];
continue;
}
- switch (cmp_type) {
- case STRING_RESULT:
- if ((tmp=args[i]->val_str(str))) // If not null
- if (sortcmp(tmp,first_expr_str,cmp_collation.collation)==0)
- return args[i+1];
- break;
- case INT_RESULT:
- if (args[i]->val_int()==first_expr_int && !args[i]->null_value)
- return args[i+1];
- break;
- case REAL_RESULT:
- if (args[i]->val_real() == first_expr_real && !args[i]->null_value)
- return args[i+1];
- break;
- case DECIMAL_RESULT:
+ }
+ else
+ {
+ /* Compare every WHEN argument with it and return the first match */
+ for (uint i=0 ; i < ncases ; i+=2)
{
- my_decimal value;
- if (my_decimal_cmp(args[i]->val_decimal(&value), first_expr_dec) == 0)
- return args[i+1];
- break;
- }
- case ROW_RESULT:
- default:
- // This case should never be chosen
- DBUG_ASSERT(0);
- break;
+ cmp_type= item_cmp_type(left_result_type, args[i]->result_type());
+ DBUG_ASSERT(cmp_type != ROW_RESULT);
+ DBUG_ASSERT(cmp_items[(uint)cmp_type]);
+ if (!(value_added_map & (1<<(uint)cmp_type)))
+ {
+ cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]);
+ if ((null_value=args[first_expr_num]->null_value))
+ return else_expr_num != -1 ? args[else_expr_num] : 0;
+ value_added_map|= 1<<(uint)cmp_type;
+ }
+ if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value)
+ return args[i + 1];
}
}
// No, WHEN clauses all missed, return ELSE expression
@@ -1791,7 +1791,7 @@ void Item_func_case::fix_length_and_dec()
Item **agg;
uint nagg;
THD *thd= current_thd;
-
+ uint found_types= 0;
if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1))))
return;
@@ -1818,16 +1818,31 @@ void Item_func_case::fix_length_and_dec()
*/
if (first_expr_num != -1)
{
+ uint i;
agg[0]= args[first_expr_num];
+ left_result_type= agg[0]->result_type();
+
for (nagg= 0; nagg < ncases/2 ; nagg++)
agg[nagg+1]= args[nagg*2];
nagg++;
- agg_cmp_type(thd, &cmp_type, agg, nagg);
- if ((cmp_type == STRING_RESULT) &&
- agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1))
- return;
+ found_types= collect_cmp_types(agg, nagg);
+
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ if (found_types & (1 << i) && !cmp_items[i])
+ {
+ DBUG_ASSERT((Item_result)i != ROW_RESULT);
+ if ((Item_result)i == STRING_RESULT &&
+ agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1))
+ return;
+ if (!(cmp_items[i]=
+ cmp_item::get_comparator((Item_result)i,
+ cmp_collation.collation)))
+ return;
+ }
+ }
}
-
+
if (else_expr_num == -1 || args[else_expr_num]->maybe_null)
maybe_null=1;
@@ -2412,16 +2427,14 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
void Item_func_in::fix_length_and_dec()
{
Item **arg, **arg_end;
- uint const_itm= 1;
+ bool const_itm= 1;
THD *thd= current_thd;
+ uint found_types= 0;
+ uint type_cnt= 0, i;
+ left_result_type= args[0]->result_type();
+ found_types= collect_cmp_types(args, arg_count);
- agg_cmp_type(thd, &cmp_type, args, arg_count);
-
- if (cmp_type == STRING_RESULT &&
- agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
- return;
-
- for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
+ for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++)
{
if (!arg[0]->const_item())
{
@@ -2429,26 +2442,39 @@ void Item_func_in::fix_length_and_dec()
break;
}
}
-
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ if (found_types & 1 << i)
+ (type_cnt)++;
+ }
/*
- Row item with NULLs inside can return NULL or FALSE =>
+ Row item with NULLs inside can return NULL or FALSE =>
they can't be processed as static
*/
- if (const_itm && !nulls_in_row())
+ if (type_cnt == 1 && const_itm && !nulls_in_row())
{
+ uint tmp_type;
+ Item_result cmp_type;
+ /* Only one cmp type was found. Extract it here */
+ for (tmp_type= 0; found_types - 1; found_types>>= 1)
+ tmp_type++;
+ cmp_type= (Item_result)tmp_type;
+
switch (cmp_type) {
case STRING_RESULT:
- array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in,
+ if (agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
+ return;
+ array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in,
cmp_collation.collation);
break;
case INT_RESULT:
- array= new in_longlong(arg_count-1);
+ array= new in_longlong(arg_count - 1);
break;
case REAL_RESULT:
- array= new in_double(arg_count-1);
+ array= new in_double(arg_count - 1);
break;
case ROW_RESULT:
- array= new in_row(arg_count-1, args[0]);
+ array= new in_row(arg_count - 1, args[0]);
break;
case DECIMAL_RESULT:
array= new in_decimal(arg_count - 1);
@@ -2468,15 +2494,25 @@ void Item_func_in::fix_length_and_dec()
else
have_null= 1;
}
- if ((array->used_count=j))
+ if ((array->used_count= j))
array->sort();
}
}
else
{
- in_item= cmp_item::get_comparator(cmp_type, cmp_collation.collation);
- if (cmp_type == STRING_RESULT)
- in_item->cmp_charset= cmp_collation.collation;
+ for (i= 0; i <= (uint) DECIMAL_RESULT; i++)
+ {
+ if (found_types & (1 << i) && !cmp_items[i])
+ {
+ if ((Item_result)i == STRING_RESULT &&
+ agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
+ return;
+ if (!(cmp_items[i]=
+ cmp_item::get_comparator((Item_result)i,
+ cmp_collation.collation)))
+ return;
+ }
+ }
}
maybe_null= args[0]->maybe_null;
max_length= 1;
@@ -2495,25 +2531,61 @@ void Item_func_in::print(String *str)
}
+/*
+ Evaluate the function and return its value.
+
+ SYNOPSIS
+ val_int()
+
+ DESCRIPTION
+ Evaluate the function and return its value.
+
+ IMPLEMENTATION
+ If the array object is defined then the value of the function is
+ calculated by means of this array.
+ Otherwise several cmp_item objects are used in order to do correct
+ comparison of left expression and an expression from the values list.
+ One cmp_item object correspond to one used comparison type. Left
+ expression can be evaluated up to number of different used comparison
+ types. A bit mapped variable value_added_map is used to check whether
+ the left expression already was evaluated for a particular result type.
+ Result types are mapped to it according to their integer values i.e.
+ STRING_RESULT is mapped to bit 0, REAL_RESULT to bit 1, so on.
+
+ RETURN
+ Value of the function
+*/
+
longlong Item_func_in::val_int()
{
+ cmp_item *in_item;
DBUG_ASSERT(fixed == 1);
+ uint value_added_map= 0;
if (array)
{
int tmp=array->find(args[0]);
null_value=args[0]->null_value || (!tmp && have_null);
return (longlong) (!null_value && tmp != negated);
}
- in_item->store_value(args[0]);
- if ((null_value=args[0]->null_value))
- return 0;
- have_null= 0;
- for (uint i=1 ; i < arg_count ; i++)
+
+ for (uint i= 1 ; i < arg_count ; i++)
{
+ Item_result cmp_type= item_cmp_type(left_result_type, args[i]->result_type());
+ in_item= cmp_items[(uint)cmp_type];
+ DBUG_ASSERT(in_item);
+ if (!(value_added_map & (1 << (uint)cmp_type)))
+ {
+ in_item->store_value(args[0]);
+ if ((null_value=args[0]->null_value))
+ return 0;
+ have_null= 0;
+ value_added_map|= 1 << (uint)cmp_type;
+ }
if (!in_item->cmp(args[i]) && !args[i]->null_value)
return (longlong) (!negated);
have_null|= args[i]->null_value;
}
+
null_value= have_null;
return (longlong) (!null_value && negated);
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 18badc71c1d..1b88153b049 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -588,50 +588,6 @@ public:
bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
-
-class Item_func_case :public Item_func
-{
- int first_expr_num, else_expr_num;
- enum Item_result cached_result_type;
- String tmp_value;
- uint ncases;
- Item_result cmp_type;
- DTCollation cmp_collation;
-public:
- Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg)
- :Item_func(), first_expr_num(-1), else_expr_num(-1),
- cached_result_type(INT_RESULT)
- {
- ncases= list.elements;
- if (first_expr_arg)
- {
- first_expr_num= list.elements;
- list.push_back(first_expr_arg);
- }
- if (else_expr_arg)
- {
- else_expr_num= list.elements;
- list.push_back(else_expr_arg);
- }
- set_arguments(list);
- }
- double val_real();
- longlong val_int();
- String *val_str(String *);
- my_decimal *val_decimal(my_decimal *);
- bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
- uint decimal_precision() const;
- table_map not_null_tables() const { return 0; }
- enum Item_result result_type () const { return cached_result_type; }
- const char *func_name() const { return "case"; }
- void print(String *str);
- Item *find_item(String *str);
- CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
- bool check_partition_func_processor(byte *int_arg) {return FALSE;}
-};
-
-
/* Functions to handle the optimized IN */
@@ -686,6 +642,7 @@ public:
{
return test(compare(collation, base + pos1*size, base + pos2*size));
}
+ virtual Item_result result_type()= 0;
};
class in_string :public in_vector
@@ -707,6 +664,7 @@ public:
Item_string *to= (Item_string*)item;
to->str_value= *str;
}
+ Item_result result_type() { return STRING_RESULT; }
};
class in_longlong :public in_vector
@@ -729,6 +687,7 @@ public:
{
((Item_int*)item)->value= ((longlong*)base)[pos];
}
+ Item_result result_type() { return INT_RESULT; }
};
class in_double :public in_vector
@@ -746,6 +705,7 @@ public:
{
((Item_float*)item)->value= ((double*) base)[pos];
}
+ Item_result result_type() { return REAL_RESULT; }
};
@@ -766,6 +726,8 @@ public:
Item_decimal *item_dec= (Item_decimal*)item;
item_dec->set_decimal_value(dec);
}
+ Item_result result_type() { return DECIMAL_RESULT; }
+
};
@@ -796,7 +758,9 @@ class cmp_item_string :public cmp_item
protected:
String *value_res;
public:
+ cmp_item_string () {}
cmp_item_string (CHARSET_INFO *cs) { cmp_charset= cs; }
+ void set_charset(CHARSET_INFO *cs) { cmp_charset= cs; }
friend class cmp_item_sort_string;
friend class cmp_item_sort_string_in_static;
};
@@ -807,6 +771,8 @@ protected:
char value_buff[STRING_BUFFER_USUAL_SIZE];
String value;
public:
+ cmp_item_sort_string():
+ cmp_item_string() {}
cmp_item_sort_string(CHARSET_INFO *cs):
cmp_item_string(cs),
value(value_buff, sizeof(value_buff), cs) {}
@@ -828,6 +794,11 @@ public:
return sortcmp(value_res, cmp->value_res, cmp_charset);
}
cmp_item *make_same();
+ void set_charset(CHARSET_INFO *cs)
+ {
+ cmp_charset= cs;
+ value.set_quick(value_buff, sizeof(value_buff), cs);
+ }
};
class cmp_item_int :public cmp_item
@@ -908,6 +879,7 @@ public:
~in_row();
void set(uint pos,Item *item);
byte *get_value(Item *item);
+ Item_result result_type() { return ROW_RESULT; }
};
/*
@@ -943,18 +915,109 @@ public:
}
};
+
+/*
+ The class Item_func_case is the CASE ... WHEN ... THEN ... END function
+ implementation.
+
+ When there is no expression between CASE and the first WHEN
+ (the CASE expression) then this function simple checks all WHEN expressions
+ one after another. When some WHEN expression evaluated to TRUE then the
+ value of the corresponding THEN expression is returned.
+
+ When the CASE expression is specified then it is compared to each WHEN
+ expression individually. When an equal WHEN expression is found
+ corresponding THEN expression is returned.
+ In order to do correct comparisons several comparators are used. One for
+ each result type. Different result types that are used in particular
+ CASE ... END expression are collected in the fix_length_and_dec() member
+ function and only comparators for there result types are used.
+*/
+
+class Item_func_case :public Item_func
+{
+ int first_expr_num, else_expr_num;
+ enum Item_result cached_result_type, left_result_type;
+ String tmp_value;
+ uint ncases;
+ Item_result cmp_type;
+ DTCollation cmp_collation;
+ cmp_item *cmp_items[5]; /* For all result types */
+ cmp_item *case_item;
+public:
+ Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg)
+ :Item_func(), first_expr_num(-1), else_expr_num(-1),
+ cached_result_type(INT_RESULT), left_result_type(INT_RESULT), case_item(0)
+ {
+ ncases= list.elements;
+ if (first_expr_arg)
+ {
+ first_expr_num= list.elements;
+ list.push_back(first_expr_arg);
+ }
+ if (else_expr_arg)
+ {
+ else_expr_num= list.elements;
+ list.push_back(else_expr_arg);
+ }
+ set_arguments(list);
+ bzero(&cmp_items, sizeof(cmp_items));
+ }
+ double val_real();
+ longlong val_int();
+ String *val_str(String *);
+ my_decimal *val_decimal(my_decimal *);
+ bool fix_fields(THD *thd, Item **ref);
+ void fix_length_and_dec();
+ uint decimal_precision() const;
+ table_map not_null_tables() const { return 0; }
+ enum Item_result result_type () const { return cached_result_type; }
+ const char *func_name() const { return "case"; }
+ void print(String *str);
+ Item *find_item(String *str);
+ CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
+ bool check_partition_func_processor(byte *bool_arg) { return FALSE;}
+ void cleanup()
+ {
+ uint i;
+ DBUG_ENTER("Item_func_case::cleanup");
+ Item_func::cleanup();
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ delete cmp_items[i];
+ cmp_items[i]= 0;
+ }
+ DBUG_VOID_RETURN;
+ }
+};
+
+/*
+ The Item_func_in class implements the in_expr IN(values_list) function.
+
+ The current implementation distinguishes 2 cases:
+ 1) all items in the value_list are constants and have the same
+ result type. This case is handled by in_vector class.
+ 2) items in the value_list have different result types or there is some
+ non-constant items.
+ In this case Item_func_in employs several cmp_item objects to performs
+ comparisons of in_expr and an item from the values_list. One cmp_item
+ object for each result type. Different result types are collected in the
+ fix_length_and_dec() member function by means of collect_cmp_types()
+ function.
+*/
class Item_func_in :public Item_func_opt_neg
{
public:
- Item_result cmp_type;
in_vector *array;
- cmp_item *in_item;
bool have_null;
+ Item_result left_result_type;
+ cmp_item *cmp_items[5]; /* One cmp_item for each result type */
DTCollation cmp_collation;
Item_func_in(List<Item> &list)
- :Item_func_opt_neg(list), array(0), in_item(0), have_null(0)
+ :Item_func_opt_neg(list), array(0), have_null(0)
{
+ bzero(&cmp_items, sizeof(cmp_items));
allowed_arg_cols= 0; // Fetch this value from first argument
}
longlong val_int();
@@ -963,12 +1026,16 @@ public:
uint decimal_precision() const { return 1; }
void cleanup()
{
+ uint i;
DBUG_ENTER("Item_func_in::cleanup");
Item_int_func::cleanup();
delete array;
- delete in_item;
array= 0;
- in_item= 0;
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ delete cmp_items[i];
+ cmp_items[i]= 0;
+ }
DBUG_VOID_RETURN;
}
optimize_type select_optimize() const
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 851551a86d9..7a82dd753b3 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1149,12 +1149,13 @@ void Item_func_substr::fix_length_and_dec()
}
if (arg_count == 3 && args[2]->const_item())
{
- int32 length= (int32) args[2]->val_int() * collation.collation->mbmaxlen;
+ int32 length= (int32) args[2]->val_int();
if (length <= 0)
max_length=0; /* purecov: inspected */
else
set_if_smaller(max_length,(uint) length);
}
+ max_length*= collation.collation->mbmaxlen;
}
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index cf122f565ff..73e2c5e6935 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -246,7 +246,27 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
aggr_sl->inner_sum_func_list->next= this;
}
aggr_sl->inner_sum_func_list= this;
-
+ aggr_sl->with_sum_func= 1;
+
+ /*
+ Mark Item_subselect(s) as containing aggregate function all the way up
+ to aggregate function's calculation context.
+ Note that we must not mark the Item of calculation context itself
+ because with_sum_func on the calculation context st_select_lex is
+ already set above.
+
+ with_sum_func being set for an Item means that this Item refers
+ (somewhere in it, e.g. one of its arguments if it's a function) directly
+ or through intermediate items to an aggregate function that is calculated
+ in a context "outside" of the Item (e.g. in the current or outer select).
+
+ with_sum_func being set for an st_select_lex means that this st_select_lex
+ has aggregate functions directly referenced (i.e. not through a sub-select).
+ */
+ for (sl= thd->lex->current_select;
+ sl && sl != aggr_sl && sl->master_unit()->item;
+ sl= sl->master_unit()->outer_select() )
+ sl->master_unit()->item->with_sum_func= 1;
}
return FALSE;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index a259fd90485..d32adde5e64 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -65,7 +65,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime,
ltime->hour, ltime->minute, ltime->second);
break;
case TIME_MICROSECOND:
- length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d.%06d",
+ length= cs->cset->snprintf(cs, buff, length, "%s%02d:%02d:%02d.%06ld",
ltime->neg ? "-" : "",
ltime->hour, ltime->minute, ltime->second,
ltime->second_part);
@@ -82,7 +82,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime,
break;
case DATE_TIME_MICROSECOND:
length= cs->cset->snprintf(cs, buff, length,
- "%04d-%02d-%02d %02d:%02d:%02d.%06d",
+ "%04d-%02d-%02d %02d:%02d:%02d.%06ld",
ltime->year, ltime->month, ltime->day,
ltime->hour, ltime->minute, ltime->second,
ltime->second_part);
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index dfa2d2a7325..44a2b690bac 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -105,6 +105,7 @@ typedef struct my_xpath_st
String *context_cache; /* last context provider */
String *pxml; /* Parsed XML, an array of MY_XML_NODE */
CHARSET_INFO *cs; /* character set/collation string comparison */
+ int error;
} MY_XPATH;
@@ -913,7 +914,9 @@ static Item *eq_func_reverse(int oper, Item *a, Item *b)
RETURN
The newly created item.
*/
-static Item *create_comparator(MY_XPATH *xpath, int oper, Item *a, Item *b)
+static Item *create_comparator(MY_XPATH *xpath,
+ int oper, MY_XPATH_LEX *context,
+ Item *a, Item *b)
{
if (a->type() != Item::XPATH_NODESET &&
b->type() != Item::XPATH_NODESET)
@@ -923,6 +926,13 @@ static Item *create_comparator(MY_XPATH *xpath, int oper, Item *a, Item *b)
else if (a->type() == Item::XPATH_NODESET &&
b->type() == Item::XPATH_NODESET)
{
+ uint len= context->end - context->beg;
+ set_if_bigger(len, 32);
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "XPATH error: "
+ "comparison of two nodesets is not supported: '%.*s'",
+ MYF(0), len, context->beg);
+
return 0; // TODO: Comparison of two nodesets
}
else
@@ -1430,7 +1440,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
static int
my_xpath_parse_term(MY_XPATH *xpath, int term)
{
- if (xpath->lasttok.term == term)
+ if (xpath->lasttok.term == term && !xpath->error)
{
xpath->prevtok= xpath->lasttok;
my_xpath_lex_scan(xpath, &xpath->lasttok,
@@ -1558,8 +1568,9 @@ static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath)
return my_xpath_parse_RelativeLocationPath(xpath);
}
- return my_xpath_parse_term(xpath, MY_XPATH_LEX_EOF) ||
- my_xpath_parse_RelativeLocationPath(xpath);
+ my_xpath_parse_RelativeLocationPath(xpath);
+
+ return (xpath->error == 0);
}
@@ -1596,7 +1607,10 @@ static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath)
"*", 1,
xpath->pxml, 1);
if (!my_xpath_parse_Step(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
}
return 1;
}
@@ -1633,10 +1647,16 @@ my_xpath_parse_AxisSpecifier_NodeTest_opt_Predicate_list(MY_XPATH *xpath)
xpath->context_cache= context_cache;
if(!my_xpath_parse_PredicateExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_RB))
+ {
+ xpath->error= 1;
return 0;
+ }
xpath->item= nodeset2bool(xpath, xpath->item);
@@ -1893,7 +1913,10 @@ static int my_xpath_parse_UnionExpr(MY_XPATH *xpath)
if (!my_xpath_parse_PathExpr(xpath)
|| xpath->item->type() != Item::XPATH_NODESET)
+ {
+ xpath->error= 1;
return 0;
+ }
xpath->item= new Item_nodeset_func_union(prev, xpath->item, xpath->pxml);
}
return 1;
@@ -1929,6 +1952,7 @@ static int my_xpath_parse_PathExpr(MY_XPATH *xpath)
{
return my_xpath_parse_LocationPath(xpath) ||
my_xpath_parse_FilterExpr_opt_slashes_RelativeLocationPath(xpath);
+
}
@@ -1975,7 +1999,10 @@ static int my_xpath_parse_OrExpr(MY_XPATH *xpath)
{
Item *prev= xpath->item;
if (!my_xpath_parse_AndExpr(xpath))
+ {
return 0;
+ xpath->error= 1;
+ }
xpath->item= new Item_cond_or(nodeset2bool(xpath, prev),
nodeset2bool(xpath, xpath->item));
}
@@ -2003,7 +2030,10 @@ static int my_xpath_parse_AndExpr(MY_XPATH *xpath)
{
Item *prev= xpath->item;
if (!my_xpath_parse_EqualityExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
xpath->item= new Item_cond_and(nodeset2bool(xpath,prev),
nodeset2bool(xpath,xpath->item));
@@ -2057,17 +2087,26 @@ static int my_xpath_parse_EqualityOperator(MY_XPATH *xpath)
}
static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath)
{
+ MY_XPATH_LEX operator_context;
if (!my_xpath_parse_RelationalExpr(xpath))
return 0;
+
+ operator_context= xpath->lasttok;
while (my_xpath_parse_EqualityOperator(xpath))
{
Item *prev= xpath->item;
int oper= xpath->extra;
if (!my_xpath_parse_RelationalExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
- if (!(xpath->item= create_comparator(xpath, oper, prev, xpath->item)))
+ if (!(xpath->item= create_comparator(xpath, oper, &operator_context,
+ prev, xpath->item)))
return 0;
+
+ operator_context= xpath->lasttok;
}
return 1;
}
@@ -2109,18 +2148,25 @@ static int my_xpath_parse_RelationalOperator(MY_XPATH *xpath)
}
static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath)
{
+ MY_XPATH_LEX operator_context;
if (!my_xpath_parse_AdditiveExpr(xpath))
return 0;
+ operator_context= xpath->lasttok;
while (my_xpath_parse_RelationalOperator(xpath))
{
Item *prev= xpath->item;
int oper= xpath->extra;
if (!my_xpath_parse_AdditiveExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
- if (!(xpath->item= create_comparator(xpath, oper, prev, xpath->item)))
+ if (!(xpath->item= create_comparator(xpath, oper, &operator_context,
+ prev, xpath->item)))
return 0;
+ operator_context= xpath->lasttok;
}
return 1;
}
@@ -2153,7 +2199,10 @@ static int my_xpath_parse_AdditiveExpr(MY_XPATH *xpath)
int oper= xpath->prevtok.term;
Item *prev= xpath->item;
if (!my_xpath_parse_MultiplicativeExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
if (oper == MY_XPATH_LEX_PLUS)
xpath->item= new Item_func_plus(prev, xpath->item);
@@ -2198,7 +2247,10 @@ static int my_xpath_parse_MultiplicativeExpr(MY_XPATH *xpath)
int oper= xpath->prevtok.term;
Item *prev= xpath->item;
if (!my_xpath_parse_UnaryExpr(xpath))
+ {
+ xpath->error= 1;
return 0;
+ }
switch (oper)
{
case MY_XPATH_LEX_ASTERISK:
diff --git a/sql/key.cc b/sql/key.cc
index 69557d971e8..be21bf11c3c 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -359,31 +359,29 @@ void key_unpack(String *to,TABLE *table,uint idx)
/*
- Return 1 if any field in a list is part of key or the key uses a field
- that is automaticly updated (like a timestamp)
+ Check if key uses field that is marked in passed field bitmap.
+
+ SYNOPSIS
+ is_key_used()
+ table TABLE object with which keys and fields are associated.
+ idx Key to be checked.
+ fields Bitmap of fields to be checked.
+
+ NOTE
+ This function uses TABLE::tmp_set bitmap so the caller should care
+ about saving/restoring its state if it also uses this bitmap.
+
+ RETURN VALUE
+ TRUE Key uses field from bitmap
+ FALSE Otherwise
*/
-bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields)
+bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
{
- List_iterator_fast<Item> f(fields);
- KEY_PART_INFO *key_part,*key_part_end;
- for (key_part=table->key_info[idx].key_part,key_part_end=key_part+
- table->key_info[idx].key_parts ;
- key_part < key_part_end;
- key_part++)
- {
- Item_field *field;
-
- if (key_part->field == table->timestamp_field)
- return 1; // Can't be used for update
-
- f.rewind();
- while ((field=(Item_field*) f++))
- {
- if (key_part->field->eq(field->field))
- return 1;
- }
- }
+ bitmap_clear_all(&table->tmp_set);
+ table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set);
+ if (bitmap_is_overlapping(&table->tmp_set, fields))
+ return 1;
/*
If table handler has primary key as part of the index, check that primary
@@ -391,7 +389,7 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields)
*/
if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY &&
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
- return check_if_key_used(table, table->s->primary_key, fields);
+ return is_key_used(table, table->s->primary_key, fields);
return 0;
}
diff --git a/sql/log.cc b/sql/log.cc
index 9875b16f8e5..a1ed9bd6df3 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -32,10 +32,22 @@
#include <mysql/plugin.h>
+/*
+ Define placement versions of operator new and operator delete since
+ we cannot be sure that the <new> include exists.
+ */
+inline void *operator new(size_t, void *ptr) { return ptr; }
+inline void *operator new[](size_t, void *ptr) { return ptr; }
+inline void operator delete(void*, void*) { /* Do nothing */ }
+inline void operator delete[](void*, void*) { /* Do nothing */ }
+
/* max size of the log message */
#define MAX_LOG_BUFFER_SIZE 1024
#define MAX_USER_HOST_SIZE 512
#define MAX_TIME_SIZE 32
+#define MY_OFF_T_UNDEF (~(my_off_t)0UL)
+
+#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
LOGGER logger;
@@ -46,13 +58,13 @@ static Muted_query_log_event invisible_commit;
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
-static int binlog_init();
-static int binlog_close_connection(THD *thd);
-static int binlog_savepoint_set(THD *thd, void *sv);
-static int binlog_savepoint_rollback(THD *thd, void *sv);
-static int binlog_commit(THD *thd, bool all);
-static int binlog_rollback(THD *thd, bool all);
-static int binlog_prepare(THD *thd, bool all);
+static int binlog_init(void *p);
+static int binlog_close_connection(handlerton *hton, THD *thd);
+static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv);
+static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv);
+static int binlog_commit(handlerton *hton, THD *thd, bool all);
+static int binlog_rollback(handlerton *hton, THD *thd, bool all);
+static int binlog_prepare(handlerton *hton, THD *thd, bool all);
sql_print_message_func sql_print_message_handlers[3] =
{
@@ -70,23 +82,92 @@ char *make_default_log_name(char *buff,const char* log_ext)
}
/*
- This is a POD. Please keep it that way!
-
- Don't add constructors, destructors, or virtual functions.
+ Helper class to store binary log transaction data.
*/
-struct binlog_trx_data {
+class binlog_trx_data {
+public:
+ binlog_trx_data()
+#ifdef HAVE_ROW_BASED_REPLICATION
+ : m_pending(0), before_stmt_pos(MY_OFF_T_UNDEF)
+#endif
+ {
+ trans_log.end_of_file= max_binlog_cache_size;
+ }
+
+ ~binlog_trx_data()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ DBUG_ASSERT(pending() == NULL);
+#endif
+ close_cached_file(&trans_log);
+ }
+
+ my_off_t position() const {
+ return my_b_tell(&trans_log);
+ }
+
bool empty() const
{
#ifdef HAVE_ROW_BASED_REPLICATION
- return pending == NULL && my_b_tell(&trans_log) == 0;
+ return pending() == NULL && my_b_tell(&trans_log) == 0;
#else
return my_b_tell(&trans_log) == 0;
#endif
}
- binlog_trx_data() {}
+
+ /*
+ Truncate the transaction cache to a certain position. This
+ includes deleting the pending event.
+ */
+ void truncate(my_off_t pos)
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ delete pending();
+ set_pending(0);
+#endif
+ reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0);
+ }
+
+ /*
+ Reset the entire contents of the transaction cache, emptying it
+ completely.
+ */
+ void reset() {
+ if (!empty())
+ truncate(0);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ before_stmt_pos= MY_OFF_T_UNDEF;
+#endif
+ trans_log.end_of_file= max_binlog_cache_size;
+ }
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ Rows_log_event *pending() const
+ {
+ return m_pending;
+ }
+
+ void set_pending(Rows_log_event *const pending)
+ {
+ m_pending= pending;
+ }
+#endif
+
IO_CACHE trans_log; // The transaction cache
+
+private:
#ifdef HAVE_ROW_BASED_REPLICATION
- Rows_log_event *pending; // The pending binrows event
+ /*
+ Pending binrows event. This event is the event where the rows are
+ currently written.
+ */
+ Rows_log_event *m_pending;
+
+public:
+ /*
+ Binlog position before the start of the current statement.
+ */
+ my_off_t before_stmt_pos;
#endif
};
@@ -1149,6 +1230,69 @@ void Log_to_csv_event_handler::
}
+ /*
+ Save position of binary log transaction cache.
+
+ SYNPOSIS
+ binlog_trans_log_savepos()
+
+ thd The thread to take the binlog data from
+ pos Pointer to variable where the position will be stored
+
+ DESCRIPTION
+
+ Save the current position in the binary log transaction cache into
+ the variable pointed to by 'pos'
+ */
+
+static void
+binlog_trans_log_savepos(THD *thd, my_off_t *pos)
+{
+ DBUG_ENTER("binlog_trans_log_savepos");
+ DBUG_ASSERT(pos != NULL);
+ if (thd->ha_data[binlog_hton->slot] == NULL)
+ thd->binlog_setup_trx_data();
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ DBUG_ASSERT(mysql_bin_log.is_open());
+ *pos= trx_data->position();
+ DBUG_PRINT("return", ("*pos=%u", *pos));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Truncate the binary log transaction cache.
+
+ SYNPOSIS
+ binlog_trans_log_truncate()
+
+ thd The thread to take the binlog data from
+ pos Position to truncate to
+
+ DESCRIPTION
+
+ Truncate the binary log to the given position. Will not change
+ anything else.
+
+ */
+static void
+binlog_trans_log_truncate(THD *thd, my_off_t pos)
+{
+ DBUG_ENTER("binlog_trans_log_truncate");
+ DBUG_PRINT("enter", ("pos=%u", pos));
+
+ DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL);
+ /* Only true if binlog_trans_log_savepos() wasn't called before */
+ DBUG_ASSERT(pos != ~(my_off_t) 0);
+
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ trx_data->truncate(pos);
+ DBUG_VOID_RETURN;
+}
+
+
/*
this function is mostly a placeholder.
conceptually, binlog initialization (now mostly done in MYSQL_BIN_LOG::open)
@@ -1171,30 +1315,66 @@ int binlog_init(void *p)
return 0;
}
-static int binlog_close_connection(THD *thd)
+static int binlog_close_connection(handlerton *hton, THD *thd)
{
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_ASSERT(mysql_bin_log.is_open() && trx_data->empty());
- close_cached_file(trans_log);
thd->ha_data[binlog_hton->slot]= 0;
+ trx_data->~binlog_trx_data();
my_free((gptr)trx_data, MYF(0));
return 0;
}
+/*
+ End a transaction.
+
+ SYNOPSIS
+ binlog_end_trans()
+
+ thd The thread whose transaction should be ended
+ trx_data Pointer to the transaction data to use
+ end_ev The end event to use, or NULL
+ all True if the entire transaction should be ended, false if
+ only the statement transaction should be ended.
+
+ DESCRIPTION
+
+ End the currently open transaction. The transaction can be either
+ a real transaction (if 'all' is true) or a statement transaction
+ (if 'all' is false).
+
+ If 'end_ev' is NULL, the transaction is a rollback of only
+ transactional tables, so the transaction cache will be truncated
+ to either just before the last opened statement transaction (if
+ 'all' is false), or reset completely (if 'all' is true).
+ */
static int
-binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev)
+binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
+ Log_event *end_ev, bool all)
{
DBUG_ENTER("binlog_end_trans");
int error=0;
IO_CACHE *trans_log= &trx_data->trans_log;
+ DBUG_PRINT("enter", ("transaction: %s, end_ev=%p",
+ all ? "all" : "stmt", end_ev));
+ DBUG_PRINT("info", ("thd->options={ %s%s}",
+ FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
+ FLAGSTR(thd->options, OPTION_BEGIN)));
-
- /* NULL denotes ROLLBACK with nothing to replicate */
+ /*
+ NULL denotes ROLLBACK with nothing to replicate: i.e., rollback of
+ only transactional tables. If the transaction contain changes to
+ any non-transactiona tables, we need write the transaction and log
+ a ROLLBACK last.
+ */
if (end_ev != NULL)
{
/*
+ Doing a commit or a rollback including non-transactional tables,
+ i.e., ending a transaction where we might write the transaction
+ cache to the binary log.
+
We can always end the statement when ending a transaction since
transactions are not allowed inside stored functions. If they
were, we would have to ensure that we're not ending a statement
@@ -1203,42 +1383,59 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev)
#ifdef HAVE_ROW_BASED_REPLICATION
thd->binlog_flush_pending_rows_event(TRUE);
#endif
- error= mysql_bin_log.write(thd, trans_log, end_ev);
+ /*
+ We write the transaction cache to the binary log if either we're
+ committing the entire transaction, or if we are doing an
+ autocommit outside a transaction.
+ */
+ if (all || !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))
+ {
+ error= mysql_bin_log.write(thd, &trx_data->trans_log, end_ev);
+ trx_data->reset();
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ We need to step the table map version after writing the
+ transaction cache to disk.
+ */
+ mysql_bin_log.update_table_map_version();
+#endif
+ statistic_increment(binlog_cache_use, &LOCK_status);
+ if (trans_log->disk_writes != 0)
+ {
+ statistic_increment(binlog_cache_disk_use, &LOCK_status);
+ trans_log->disk_writes= 0;
+ }
+ }
}
#ifdef HAVE_ROW_BASED_REPLICATION
else
{
-#ifdef HAVE_ROW_BASED_REPLICATION
- thd->binlog_delete_pending_rows_event();
-#endif
- }
+ /*
+ If rolling back an entire transaction or a single statement not
+ inside a transaction, we reset the transaction cache.
- /*
- We need to step the table map version both after writing the
- entire transaction to the log file and after rolling back the
- transaction.
-
- We need to step the table map version after writing the
- transaction cache to disk. In addition, we need to step the table
- map version on a rollback to ensure that a new table map event is
- generated instead of the one that was written to the thrown-away
- transaction cache.
- */
- mysql_bin_log.update_table_map_version();
-#endif
+ If rolling back a statement in a transaction, we truncate the
+ transaction cache to remove the statement.
- statistic_increment(binlog_cache_use, &LOCK_status);
- if (trans_log->disk_writes != 0)
- {
- statistic_increment(binlog_cache_disk_use, &LOCK_status);
- trans_log->disk_writes= 0;
+ */
+ if (all || !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))
+ trx_data->reset();
+ else
+ trx_data->truncate(trx_data->before_stmt_pos); // ...statement
+
+ /*
+ We need to step the table map version on a rollback to ensure
+ that a new table map event is generated instead of the one that
+ was written to the thrown-away transaction cache.
+ */
+ mysql_bin_log.update_table_map_version();
}
- reinit_io_cache(trans_log, WRITE_CACHE, (my_off_t) 0, 0, 1); // cannot fail
- trans_log->end_of_file= max_binlog_cache_size;
+#endif
+
DBUG_RETURN(error);
}
-static int binlog_prepare(THD *thd, bool all)
+static int binlog_prepare(handlerton *hton, THD *thd, bool all)
{
/*
do nothing.
@@ -1249,44 +1446,49 @@ static int binlog_prepare(THD *thd, bool all)
return 0;
}
-static int binlog_commit(THD *thd, bool all)
+static int binlog_commit(handlerton *hton, THD *thd, bool all)
{
+ int error= 0;
DBUG_ENTER("binlog_commit");
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
IO_CACHE *trans_log= &trx_data->trans_log;
- DBUG_ASSERT(mysql_bin_log.is_open() &&
- (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))));
+ DBUG_ASSERT(mysql_bin_log.is_open());
- if (trx_data->empty())
+ if (all && trx_data->empty())
{
// we're here because trans_log was flushed in MYSQL_BIN_LOG::log()
+ trx_data->reset();
DBUG_RETURN(0);
}
- if (all)
+ if (all)
{
Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- DBUG_RETURN(binlog_end_trans(thd, trx_data, &qev));
+ int error= binlog_end_trans(thd, trx_data, &qev, all);
+ DBUG_RETURN(error);
}
else
- DBUG_RETURN(binlog_end_trans(thd, trx_data, &invisible_commit));
+ {
+ int error= binlog_end_trans(thd, trx_data, &invisible_commit, all);
+ DBUG_RETURN(error);
+ }
}
-static int binlog_rollback(THD *thd, bool all)
+static int binlog_rollback(handlerton *hton, THD *thd, bool all)
{
DBUG_ENTER("binlog_rollback");
int error=0;
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
IO_CACHE *trans_log= &trx_data->trans_log;
- /*
- First assert is guaranteed - see trans_register_ha() call below.
- The second must be true. If it is not, we're registering
- unnecessary, doing extra work. The cause should be found and eliminated
- */
- DBUG_ASSERT(all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)));
- DBUG_ASSERT(mysql_bin_log.is_open() && !trx_data->empty());
+ DBUG_ASSERT(mysql_bin_log.is_open());
+
+ if (trx_data->empty()) {
+ trx_data->reset();
+ DBUG_RETURN(0);
+ }
+
/*
Update the binary log with a BEGIN/ROLLBACK block if we have
cached some queries and we updated some non-transactional
@@ -1298,10 +1500,10 @@ static int binlog_rollback(THD *thd, bool all)
{
Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE);
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- error= binlog_end_trans(thd, trx_data, &qev);
+ error= binlog_end_trans(thd, trx_data, &qev, all);
}
else
- error= binlog_end_trans(thd, trx_data, 0);
+ error= binlog_end_trans(thd, trx_data, 0, all);
DBUG_RETURN(error);
}
@@ -1326,14 +1528,11 @@ static int binlog_rollback(THD *thd, bool all)
that case there is no need to have it in the binlog).
*/
-static int binlog_savepoint_set(THD *thd, void *sv)
+static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
{
DBUG_ENTER("binlog_savepoint_set");
- binlog_trx_data *const trx_data=
- (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(&trx_data->trans_log));
- *(my_off_t *)sv= my_b_tell(&trx_data->trans_log);
+ binlog_trans_log_savepos(thd, (my_off_t*) sv);
/* Write it to the binary log */
int const error=
@@ -1342,13 +1541,13 @@ static int binlog_savepoint_set(THD *thd, void *sv)
DBUG_RETURN(error);
}
-static int binlog_savepoint_rollback(THD *thd, void *sv)
+static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
{
DBUG_ENTER("binlog_savepoint_rollback");
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
IO_CACHE *trans_log= &trx_data->trans_log;
- DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log));
+ DBUG_ASSERT(mysql_bin_log.is_open());
/*
Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some
@@ -1363,7 +1562,7 @@ static int binlog_savepoint_rollback(THD *thd, void *sv)
thd->query, thd->query_length, TRUE, FALSE);
DBUG_RETURN(error);
}
- reinit_io_cache(trans_log, WRITE_CACHE, *(my_off_t *)sv, 0, 0);
+ binlog_trans_log_truncate(thd, *(my_off_t*)sv);
DBUG_RETURN(0);
}
@@ -1579,17 +1778,18 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
if (log_type == LOG_NORMAL)
{
char *end;
- int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s. "
+ int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s (%s). "
#ifdef EMBEDDED_LIBRARY
- "embedded library\n", my_progname, server_version
+ "embedded library\n",
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT
#elif __NT__
"started with:\nTCP Port: %d, Named Pipe: %s\n",
- my_progname, server_version, mysqld_port,
- mysqld_unix_port
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT,
+ mysqld_port, mysqld_unix_port
#else
"started with:\nTcp port: %d Unix socket: %s\n",
- my_progname, server_version, mysqld_port,
- mysqld_unix_port
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT,
+ mysqld_port, mysqld_unix_port
#endif
);
end= strnmov(buff + len, "Time Id Command Argument\n",
@@ -2493,7 +2693,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
thread. If the transaction involved MyISAM tables, it should go
into binlog even on rollback.
*/
- (void) pthread_mutex_lock(&LOCK_thread_count);
+ VOID(pthread_mutex_lock(&LOCK_thread_count));
/* Save variables so that we can reopen the log */
save_name=name;
@@ -2525,7 +2725,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
my_free((gptr) save_name, MYF(0));
err:
- (void) pthread_mutex_unlock(&LOCK_thread_count);
+ VOID(pthread_mutex_unlock(&LOCK_thread_count));
pthread_mutex_unlock(&LOCK_index);
pthread_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
@@ -3091,18 +3291,76 @@ int THD::binlog_setup_trx_data()
ha_data[binlog_hton->slot]= 0;
DBUG_RETURN(1); // Didn't manage to set it up
}
- trx_data->trans_log.end_of_file= max_binlog_cache_size;
+
+ trx_data= new (ha_data[binlog_hton->slot]) binlog_trx_data;
+
DBUG_RETURN(0);
}
+#ifdef HAVE_ROW_BASED_REPLICATION
/*
- Write a table map to the binary log.
+ Function to start a statement and optionally a transaction for the
+ binary log.
+
+ SYNOPSIS
+ binlog_start_trans_and_stmt()
- This function is called from ha_external_lock() after the storage
- engine has registered for the transaction.
+ DESCRIPTION
+
+ This function does three things:
+ - Start a transaction if not in autocommit mode or if a BEGIN
+ statement has been seen.
+
+ - Start a statement transaction to allow us to truncate the binary
+ log.
+
+ - Save the currrent binlog position so that we can roll back the
+ statement by truncating the transaction log.
+
+ We only update the saved position if the old one was undefined,
+ the reason is that there are some cases (e.g., for CREATE-SELECT)
+ where the position is saved twice (e.g., both in
+ select_create::prepare() and THD::binlog_write_table_map()) , but
+ we should use the first. This means that calls to this function
+ can be used to start the statement before the first table map
+ event, to include some extra events.
+ */
+
+void
+THD::binlog_start_trans_and_stmt()
+{
+ DBUG_ENTER("binlog_start_trans_and_stmt");
+ binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
+ DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data));
+ if (trx_data)
+ DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u",
+ trx_data->before_stmt_pos));
+ if (trx_data == NULL ||
+ trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
+ {
+ /*
+ The call to binlog_trans_log_savepos() might create the trx_data
+ structure, if it didn't exist before, so we save the position
+ into an auto variable and then write it into the transaction
+ data for the binary log (i.e., trx_data).
+ */
+ my_off_t pos= 0;
+ binlog_trans_log_savepos(this, &pos);
+ trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
+
+ trx_data->before_stmt_pos= pos;
+
+ if (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ trans_register_ha(this, TRUE, binlog_hton);
+ trans_register_ha(this, FALSE, binlog_hton);
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Write a table map to the binary log.
*/
-#ifdef HAVE_ROW_BASED_REPLICATION
int THD::binlog_write_table_map(TABLE *table, bool is_trans)
{
int error;
@@ -3121,10 +3379,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
Table_map_log_event
the_event(this, table, table->s->table_map_id, is_trans, flags);
- if (is_trans)
- trans_register_ha(this,
- (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
- binlog_hton);
+ if (is_trans && binlog_table_maps == 0)
+ binlog_start_trans_and_stmt();
if ((error= mysql_bin_log.write(&the_event)))
DBUG_RETURN(error);
@@ -3145,7 +3401,7 @@ THD::binlog_get_pending_rows_event() const
(since the trx_data is set up there). In that case, we just return
NULL.
*/
- return trx_data ? trx_data->pending : NULL;
+ return trx_data ? trx_data->pending() : NULL;
}
void
@@ -3158,7 +3414,7 @@ THD::binlog_set_pending_rows_event(Rows_log_event* ev)
(binlog_trx_data*) ha_data[binlog_hton->slot];
DBUG_ASSERT(trx_data);
- trx_data->pending= ev;
+ trx_data->set_pending(ev);
}
@@ -3167,8 +3423,9 @@ THD::binlog_set_pending_rows_event(Rows_log_event* ev)
(either cached binlog if transaction, or disk binlog). Sets a new pending
event.
*/
-int MYSQL_BIN_LOG::
- flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event)
+int
+MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
+ Rows_log_event* event)
{
DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
DBUG_ASSERT(mysql_bin_log.is_open());
@@ -3181,9 +3438,9 @@ int MYSQL_BIN_LOG::
DBUG_ASSERT(trx_data);
- DBUG_PRINT("info", ("trx_data->pending=%p", trx_data->pending));
+ DBUG_PRINT("info", ("trx_data->pending()=%p", trx_data->pending()));
- if (Rows_log_event* pending= trx_data->pending)
+ if (Rows_log_event* pending= trx_data->pending())
{
IO_CACHE *file= &log_file;
@@ -3333,15 +3590,14 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
IO_CACHE *trans_log= &trx_data->trans_log;
- bool trans_log_in_use= my_b_tell(trans_log) != 0;
- if (event_info->get_cache_stmt() && !trans_log_in_use)
- trans_register_ha(thd,
- (thd->options &
- (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) != 0,
- binlog_hton);
- if (event_info->get_cache_stmt() || trans_log_in_use)
+ my_off_t trans_log_pos= my_b_tell(trans_log);
+ if (event_info->get_cache_stmt() || trans_log_pos != 0)
{
- DBUG_PRINT("info", ("Using trans_log"));
+ DBUG_PRINT("info", ("Using trans_log: cache=%d, trans_log_pos=%u",
+ event_info->get_cache_stmt(),
+ trans_log_pos));
+ if (trans_log_pos == 0)
+ thd->binlog_start_trans_and_stmt();
file= trans_log;
}
/*
@@ -3545,61 +3801,69 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
uint length;
/*
- Log "BEGIN" at the beginning of the transaction.
- which may contain more than 1 SQL statement.
- */
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ We only bother to write to the binary log if there is anything
+ to write.
+ */
+ if (my_b_tell(cache) > 0)
{
- Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE);
/*
- Imagine this is rollback due to net timeout, after all statements of
- the transaction succeeded. Then we want a zero-error code in BEGIN.
- In other words, if there was a really serious error code it's already
- in the statement's events, there is no need to put it also in this
- internally generated event, and as this event is generated late it
- would lead to false alarms.
- This is safer than thd->clear_error() against kills at shutdown.
+ Log "BEGIN" at the beginning of the transaction.
+ which may contain more than 1 SQL statement.
*/
- qinfo.error_code= 0;
- /*
- Now this Query_log_event has artificial log_pos 0. It must be adjusted
- to reflect the real position in the log. Not doing it would confuse the
- slave: it would prevent this one from knowing where he is in the
- master's binlog, which would result in wrong positions being shown to
- the user, MASTER_POS_WAIT undue waiting etc.
- */
- if (qinfo.write(&log_file))
- goto err;
- }
- /* Read from the file used to cache the queries .*/
- if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
- goto err;
- length=my_b_bytes_in_cache(cache);
- DBUG_EXECUTE_IF("half_binlogged_transaction", length-=100;);
- do
- {
- /* Write data to the binary log file */
- if (my_b_write(&log_file, cache->read_pos, length))
- goto err;
- cache->read_pos=cache->read_end; // Mark buffer used up
- DBUG_EXECUTE_IF("half_binlogged_transaction", goto DBUG_skip_commit;);
- } while ((length=my_b_fill(cache)));
+ if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ {
+ Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE);
+ /*
+ Imagine this is rollback due to net timeout, after all statements of
+ the transaction succeeded. Then we want a zero-error code in BEGIN.
+ In other words, if there was a really serious error code it's already
+ in the statement's events, there is no need to put it also in this
+ internally generated event, and as this event is generated late it
+ would lead to false alarms.
+ This is safer than thd->clear_error() against kills at shutdown.
+ */
+ qinfo.error_code= 0;
+ /*
+ Now this Query_log_event has artificial log_pos 0. It must be adjusted
+ to reflect the real position in the log. Not doing it would confuse the
+ slave: it would prevent this one from knowing where he is in the
+ master's binlog, which would result in wrong positions being shown to
+ the user, MASTER_POS_WAIT undue waiting etc.
+ */
+ if (qinfo.write(&log_file))
+ goto err;
+ }
+ /* Read from the file used to cache the queries .*/
+ if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
+ goto err;
+ length=my_b_bytes_in_cache(cache);
+ DBUG_EXECUTE_IF("half_binlogged_transaction", length-=100;);
+ do
+ {
+ /* Write data to the binary log file */
+ if (my_b_write(&log_file, cache->read_pos, length))
+ goto err;
+ cache->read_pos=cache->read_end; // Mark buffer used up
+ DBUG_EXECUTE_IF("half_binlogged_transaction", goto DBUG_skip_commit;);
+ } while ((length=my_b_fill(cache)));
- if (commit_event->write(&log_file))
- goto err;
+ if (commit_event && commit_event->write(&log_file))
+ goto err;
#ifndef DBUG_OFF
-DBUG_skip_commit:
+ DBUG_skip_commit:
#endif
- if (flush_and_sync())
- goto err;
- DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
- if (cache->error) // Error on read
- {
- sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
- write_error=1; // Don't give more errors
- goto err;
+ if (flush_and_sync())
+ goto err;
+ DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
+ if (cache->error) // Error on read
+ {
+ sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
+ write_error=1; // Don't give more errors
+ goto err;
+ }
+ signal_update();
}
- signal_update();
+
/*
if commit_event is Xid_log_event, increase the number of
prepared_xids (it's decreasd in ::unlog()). Binlog cannot be rotated
@@ -3608,7 +3872,7 @@ DBUG_skip_commit:
If the commit_event is not Xid_log_event (then it's a Query_log_event)
rotate binlog, if necessary.
*/
- if (commit_event->get_type_code() == XID_EVENT)
+ if (commit_event && commit_event->get_type_code() == XID_EVENT)
{
pthread_mutex_lock(&LOCK_prep_xids);
prepared_xids++;
@@ -4618,12 +4882,17 @@ int TC_LOG_BINLOG::log(THD *thd, my_xid xid)
Xid_log_event xle(thd, xid);
binlog_trx_data *trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- DBUG_RETURN(!binlog_end_trans(thd, trx_data, &xle)); // invert return value
+ /*
+ We always commit the entire transaction when writing an XID. Also
+ note that the return value is inverted.
+ */
+ DBUG_RETURN(!binlog_end_trans(thd, trx_data, &xle, TRUE));
}
void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
{
pthread_mutex_lock(&LOCK_prep_xids);
+ DBUG_ASSERT(prepared_xids > 0);
if (--prepared_xids == 0)
pthread_cond_signal(&COND_prep_xids);
pthread_mutex_unlock(&LOCK_prep_xids);
@@ -4678,7 +4947,7 @@ err1:
}
struct st_mysql_storage_engine binlog_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION, binlog_hton };
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
mysql_declare_plugin(binlog)
{
@@ -4687,6 +4956,7 @@ mysql_declare_plugin(binlog)
"binlog",
"MySQL AB",
"This is a pseudo storage engine to represent the binlog in a transaction",
+ PLUGIN_LICENSE_GPL,
binlog_init, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
diff --git a/sql/log.h b/sql/log.h
index 2b33f70392a..8f75601f02b 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -177,7 +177,7 @@ public:
pthread_mutex_t LOCK_log;
char *name;
char log_file_name[FN_REFLEN];
- char time_buff[20], db[NAME_BYTE_LEN + 1];
+ char time_buff[20], db[NAME_LEN + 1];
bool write_error, inited;
IO_CACHE log_file;
enum_log_type log_type;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index faff2cae48e..4a6346bf57c 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -33,31 +33,110 @@
#define log_cs &my_charset_latin1
/*
+ Cache that will automatically be written to a dedicated file on
+ destruction.
+
+ DESCRIPTION
+
+ */
+class Write_on_release_cache
+{
+public:
+ enum flag
+ {
+ FLUSH_F
+ };
+
+ typedef unsigned short flag_set;
+
+ /*
+ Constructor.
+
+ SYNOPSIS
+ Write_on_release_cache
+ cache Pointer to cache to use
+ file File to write cache to upon destruction
+ flags Flags for the cache
+
+ DESCRIPTION
+
+ Class used to guarantee copy of cache to file before exiting the
+ current block. On successful copy of the cache, the cache will
+ be reinited as a WRITE_CACHE.
+
+ Currently, a pointer to the cache is provided in the
+ constructor, but it would be possible to create a subclass
+ holding the IO_CACHE itself.
+ */
+ Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags = 0)
+ : m_cache(cache), m_file(file), m_flags(flags)
+ {
+ reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE);
+ }
+
+ ~Write_on_release_cache()
+ {
+ if (!my_b_copy_to_file(m_cache, m_file))
+ reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE);
+ if (m_flags | FLUSH_F)
+ fflush(m_file);
+ }
+
+ /*
+ Return a pointer to the internal IO_CACHE.
+
+ SYNOPSIS
+ operator&()
+
+ DESCRIPTION
+ Function to return a pointer to the internal, so that the object
+ can be treated as a IO_CACHE and used with the my_b_* IO_CACHE
+ functions
+
+ RETURN VALUE
+ A pointer to the internal IO_CACHE.
+ */
+ IO_CACHE *operator&()
+ {
+ return m_cache;
+ }
+
+private:
+ // Hidden, to prevent usage.
+ Write_on_release_cache(Write_on_release_cache const&);
+
+ IO_CACHE *m_cache;
+ FILE *m_file;
+ flag_set m_flags;
+};
+
+
+/*
pretty_print_str()
*/
#ifdef MYSQL_CLIENT
-static void pretty_print_str(FILE* file, char* str, int len)
+static void pretty_print_str(IO_CACHE* cache, char* str, int len)
{
char* end = str + len;
- fputc('\'', file);
+ my_b_printf(cache, "\'");
while (str < end)
{
char c;
switch ((c=*str++)) {
- case '\n': fprintf(file, "\\n"); break;
- case '\r': fprintf(file, "\\r"); break;
- case '\\': fprintf(file, "\\\\"); break;
- case '\b': fprintf(file, "\\b"); break;
- case '\t': fprintf(file, "\\t"); break;
- case '\'': fprintf(file, "\\'"); break;
- case 0 : fprintf(file, "\\0"); break;
+ case '\n': my_b_printf(cache, "\\n"); break;
+ case '\r': my_b_printf(cache, "\\r"); break;
+ case '\\': my_b_printf(cache, "\\\\"); break;
+ case '\b': my_b_printf(cache, "\\b"); break;
+ case '\t': my_b_printf(cache, "\\t"); break;
+ case '\'': my_b_printf(cache, "\\'"); break;
+ case 0 : my_b_printf(cache, "\\0"); break;
default:
- fputc(c, file);
+ my_b_printf(cache, "%c", c);
break;
}
}
- fputc('\'', file);
+ my_b_printf(cache, "\'");
}
#endif /* MYSQL_CLIENT */
@@ -294,14 +373,15 @@ append_query_string(CHARSET_INFO *csinfo,
*/
#ifdef MYSQL_CLIENT
-static void print_set_option(FILE* file, uint32 bits_changed, uint32 option,
- uint32 flags, const char* name, bool* need_comma)
+static void print_set_option(IO_CACHE* file, uint32 bits_changed,
+ uint32 option, uint32 flags, const char* name,
+ bool* need_comma)
{
if (bits_changed & option)
{
if (*need_comma)
- fprintf(file,", ");
- fprintf(file,"%s=%d", name, test(flags & option));
+ my_b_printf(file,", ");
+ my_b_printf(file,"%s=%d", name, test(flags & option));
*need_comma= 1;
}
}
@@ -960,20 +1040,23 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
Log_event::print_header()
*/
-void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
+void Log_event::print_header(IO_CACHE* file,
+ PRINT_EVENT_INFO* print_event_info,
+ bool is_more __attribute__((unused)))
{
char llbuff[22];
my_off_t hexdump_from= print_event_info->hexdump_from;
+ DBUG_ENTER("Log_event::print_header");
- fputc('#', file);
+ my_b_printf(file, "#");
print_timestamp(file);
- fprintf(file, " server id %d end_log_pos %s ", server_id,
- llstr(log_pos,llbuff));
+ my_b_printf(file, " server id %d end_log_pos %s ", server_id,
+ llstr(log_pos,llbuff));
/* mysqlbinlog --hexdump */
if (print_event_info->hexdump_from)
{
- fprintf(file, "\n");
+ my_b_printf(file, "\n");
uchar *ptr= (uchar*)temp_buf;
my_off_t size=
uint4korr(ptr + EVENT_LEN_OFFSET) - LOG_EVENT_MINIMAL_HEADER_LEN;
@@ -986,15 +1069,21 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
/* Pretty-print event common header if header is exactly 19 bytes */
if (print_event_info->common_header_len == LOG_EVENT_MINIMAL_HEADER_LEN)
{
- fprintf(file, "# Position Timestamp Type Master ID "
- "Size Master Pos Flags \n");
- fprintf(file, "# %8.8lx %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x\n",
- (unsigned long) hexdump_from,
- ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
- ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12], ptr[13],
- ptr[14], ptr[15], ptr[16], ptr[17], ptr[18]);
+ char emit_buf[256]; // Enough for storing one line
+ my_b_printf(file, "# Position Timestamp Type Master ID "
+ "Size Master Pos Flags \n");
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x\n",
+ (unsigned long) hexdump_from,
+ ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
+ ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12], ptr[13],
+ ptr[14], ptr[15], ptr[16], ptr[17], ptr[18]);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
ptr += LOG_EVENT_MINIMAL_HEADER_LEN;
hexdump_from += LOG_EVENT_MINIMAL_HEADER_LEN;
}
@@ -1011,9 +1100,21 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (i % 16 == 15)
{
- fprintf(file, "# %8.8lx %-48.48s |%16s|\n",
- (unsigned long) (hexdump_from + (i & 0xfffffff0)),
- hex_string, char_string);
+ /*
+ my_b_printf() does not support full printf() formats, so we
+ have to do it this way.
+
+ TODO: Rewrite my_b_printf() to support full printf() syntax.
+ */
+ char emit_buf[256];
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %-48.48s |%16s|\n",
+ (unsigned long) (hexdump_from + (i & 0xfffffff0)),
+ hex_string, char_string);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
hex_string[0]= 0;
char_string[0]= 0;
c= char_string;
@@ -1025,28 +1126,52 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
/* Non-full last line */
if (hex_string[0])
- fprintf(file, "# %8.8lx %-48.48s |%s|\n# ",
- (unsigned long) (hexdump_from + (i & 0xfffffff0)),
- hex_string, char_string);
+ {
+ char emit_buf[256];
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %-48.48s |%s|\n# ",
+ (unsigned long) (hexdump_from + (i & 0xfffffff0)),
+ hex_string, char_string);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
+ }
}
+ DBUG_VOID_RETURN;
}
-void Log_event::print_base64(FILE* file, PRINT_EVENT_INFO* print_event_info)
+void Log_event::print_base64(IO_CACHE* file,
+ PRINT_EVENT_INFO* print_event_info,
+ bool more)
{
- uchar *ptr= (uchar*)temp_buf;
+ const uchar *ptr= (const uchar *)temp_buf;
my_off_t size= uint4korr(ptr + EVENT_LEN_OFFSET);
- char *tmp_str=
- (char *) my_malloc(base64_needed_encoded_length(size), MYF(MY_WME));
+ DBUG_ENTER("Log_event::print_base64");
+
+ size_t const tmp_str_sz= base64_needed_encoded_length(size);
+ char *const tmp_str= (char *) my_malloc(tmp_str_sz, MYF(MY_WME));
if (!tmp_str) {
fprintf(stderr, "\nError: Out of memory. "
"Could not print correct binlog event.\n");
- return;
+ DBUG_VOID_RETURN;
}
- int res= base64_encode(ptr, size, tmp_str);
- fprintf(file, "\nBINLOG '\n%s\n';\n", tmp_str);
+
+ int const res= base64_encode(ptr, size, tmp_str);
+ DBUG_ASSERT(res == 0);
+
+ if (my_b_tell(file) == 0)
+ my_b_printf(file, "\nBINLOG '\n");
+
+ my_b_printf(file, "%s\n", tmp_str);
+
+ if (!more)
+ my_b_printf(file, "';\n");
+
my_free(tmp_str, MYF(0));
+ DBUG_VOID_RETURN;
}
@@ -1054,9 +1179,10 @@ void Log_event::print_base64(FILE* file, PRINT_EVENT_INFO* print_event_info)
Log_event::print_timestamp()
*/
-void Log_event::print_timestamp(FILE* file, time_t* ts)
+void Log_event::print_timestamp(IO_CACHE* file, time_t* ts)
{
struct tm *res;
+ DBUG_ENTER("Log_event::print_timestamp");
if (!ts)
ts = &when;
#ifdef MYSQL_SERVER // This is always false
@@ -1066,13 +1192,14 @@ void Log_event::print_timestamp(FILE* file, time_t* ts)
res=localtime(ts);
#endif
- fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
- res->tm_year % 100,
- res->tm_mon+1,
- res->tm_mday,
- res->tm_hour,
- res->tm_min,
- res->tm_sec);
+ my_b_printf(file,"%02d%02d%02d %2d:%02d:%02d",
+ res->tm_year % 100,
+ res->tm_mon+1,
+ res->tm_mday,
+ res->tm_hour,
+ res->tm_min,
+ res->tm_sec);
+ DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -1544,7 +1671,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
*/
#ifdef MYSQL_CLIENT
-void Query_log_event::print_query_header(FILE* file,
+void Query_log_event::print_query_header(IO_CACHE* file,
PRINT_EVENT_INFO* print_event_info)
{
// TODO: print the catalog ??
@@ -1554,9 +1681,10 @@ void Query_log_event::print_query_header(FILE* file,
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n",
- get_type_str(), (ulong) thread_id, (ulong) exec_time, error_code);
+ print_header(file, print_event_info, FALSE);
+ my_b_printf(file, "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n",
+ get_type_str(), (ulong) thread_id, (ulong) exec_time,
+ error_code);
}
if (!(flags & LOG_EVENT_SUPPRESS_USE_F) && db)
@@ -1564,15 +1692,15 @@ void Query_log_event::print_query_header(FILE* file,
if (different_db= memcmp(print_event_info->db, db, db_len + 1))
memcpy(print_event_info->db, db, db_len + 1);
if (db[0] && different_db)
- fprintf(file, "use %s;\n", db);
+ my_b_printf(file, "use %s;\n", db);
}
end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10);
*end++=';';
*end++='\n';
- my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME));
+ my_b_write(file, (byte*) buff, (uint) (end-buff));
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
+ my_b_printf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
/*
If flags2_inited==0, this is an event from 3.23 or 4.0; nothing to
@@ -1594,14 +1722,14 @@ void Query_log_event::print_query_header(FILE* file,
if (unlikely(tmp)) /* some bits have changed */
{
bool need_comma= 0;
- fprintf(file, "SET ");
+ my_b_printf(file, "SET ");
print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2,
"@@session.foreign_key_checks", &need_comma);
print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2,
"@@session.sql_auto_is_null", &need_comma);
print_set_option(file, tmp, OPTION_RELAXED_UNIQUE_CHECKS, ~flags2,
"@@session.unique_checks", &need_comma);
- fprintf(file,";\n");
+ my_b_printf(file,";\n");
print_event_info->flags2= flags2;
}
}
@@ -1629,14 +1757,14 @@ void Query_log_event::print_query_header(FILE* file,
}
if (unlikely(print_event_info->sql_mode != sql_mode))
{
- fprintf(file,"SET @@session.sql_mode=%lu;\n",(ulong)sql_mode);
+ my_b_printf(file,"SET @@session.sql_mode=%lu;\n",(ulong)sql_mode);
print_event_info->sql_mode= sql_mode;
}
}
if (print_event_info->auto_increment_increment != auto_increment_increment ||
print_event_info->auto_increment_offset != auto_increment_offset)
{
- fprintf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n",
+ my_b_printf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n",
auto_increment_increment,auto_increment_offset);
print_event_info->auto_increment_increment= auto_increment_increment;
print_event_info->auto_increment_offset= auto_increment_offset;
@@ -1656,16 +1784,17 @@ void Query_log_event::print_query_header(FILE* file,
CHARSET_INFO *cs_info= get_charset(uint2korr(charset), MYF(MY_WME));
if (cs_info)
{
- fprintf(file, "/*!\\C %s */;\n", cs_info->csname); /* for mysql client */
+ /* for mysql client */
+ my_b_printf(file, "/*!\\C %s */;\n", cs_info->csname);
}
- fprintf(file,"SET "
- "@@session.character_set_client=%d,"
- "@@session.collation_connection=%d,"
- "@@session.collation_server=%d"
- ";\n",
- uint2korr(charset),
- uint2korr(charset+2),
- uint2korr(charset+4));
+ my_b_printf(file,"SET "
+ "@@session.character_set_client=%d,"
+ "@@session.collation_connection=%d,"
+ "@@session.collation_server=%d"
+ ";\n",
+ uint2korr(charset),
+ uint2korr(charset+2),
+ uint2korr(charset+4));
memcpy(print_event_info->charset, charset, 6);
}
}
@@ -1673,7 +1802,7 @@ void Query_log_event::print_query_header(FILE* file,
{
if (bcmp(print_event_info->time_zone_str, time_zone_str, time_zone_len+1))
{
- fprintf(file,"SET @@session.time_zone='%s';\n", time_zone_str);
+ my_b_printf(file,"SET @@session.time_zone='%s';\n", time_zone_str);
memcpy(print_event_info->time_zone_str, time_zone_str, time_zone_len+1);
}
}
@@ -1682,9 +1811,11 @@ void Query_log_event::print_query_header(FILE* file,
void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
- print_query_header(file, print_event_info);
- my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
- fputs(";\n", file);
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
+ print_query_header(&cache, print_event_info);
+ my_b_write(&cache, (byte*) query, q_len);
+ my_b_printf(&cache, ";\n");
}
#endif /* MYSQL_CLIENT */
@@ -2014,18 +2145,23 @@ void Start_log_event_v3::pack_info(Protocol *protocol)
#ifdef MYSQL_CLIENT
void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ DBUG_ENTER("Start_log_event_v3::print");
+
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version,
- server_version);
- print_timestamp(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tStart: binlog v %d, server v %s created ",
+ binlog_version, server_version);
+ print_timestamp(&cache);
if (created)
- fprintf(file," at startup");
- fputc('\n', file);
+ my_b_printf(&cache," at startup");
+ my_b_printf(&cache, "\n");
if (flags & LOG_EVENT_BINLOG_IN_USE_F)
- fprintf(file, "# Warning: this binlog was not closed properly. "
- "Most probably mysqld crashed writing it.\n");
+ my_b_printf(&cache, "# Warning: this binlog was not closed properly. "
+ "Most probably mysqld crashed writing it.\n");
}
if (!artificial_event && created)
{
@@ -2036,12 +2172,12 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
and rollback unfinished transaction.
Probably this can be done with RESET CONNECTION (syntax to be defined).
*/
- fprintf(file,"RESET CONNECTION;\n");
+ my_b_printf(&cache,"RESET CONNECTION;\n");
#else
- fprintf(file,"ROLLBACK;\n");
+ my_b_printf(&cache,"ROLLBACK;\n");
#endif
}
- fflush(file);
+ DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -2381,19 +2517,19 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli)
if (server_id == (uint32) ::server_id)
{
/*
- Do not modify rli->group_master_log_pos, as this event did not exist on
- the master. That is, just update the *relay log* coordinates; this is
- done by passing log_pos=0 to inc_group_relay_log_pos, like we do in
- Stop_log_event::exec_event().
- If in a transaction, don't touch group_* coordinates.
- */
- if (thd->options & OPTION_BEGIN)
- rli->inc_event_relay_log_pos();
- else
- {
- rli->inc_group_relay_log_pos(0);
- flush_relay_log_info(rli);
- }
+ We only increase the relay log position if we are skipping
+ events and do not touch any group_* variables, nor flush the
+ relay log info. If there is a crash, we will have to re-skip
+ the events again, but that is a minor issue.
+
+ If we do not skip stepping the group log position (and the
+ server id was changed when restarting the server), it might well
+ be that we start executing at a position that is invalid, e.g.,
+ at a Rows_log_event or a Query_log_event preceeded by a
+ Intvar_log_event instead of starting at a Table_map_log_event or
+ the Intvar_log_event respectively.
+ */
+ rli->inc_event_relay_log_pos();
DBUG_RETURN(0);
}
@@ -2765,15 +2901,17 @@ void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
}
-void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
+void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
bool commented)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
+
DBUG_ENTER("Load_log_event::print");
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
- thread_id, exec_time);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
+ thread_id, exec_time);
}
bool different_db= 1;
@@ -2791,65 +2929,65 @@ void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
}
if (db && db[0] && different_db)
- fprintf(file, "%suse %s;\n",
+ my_b_printf(&cache, "%suse %s;\n",
commented ? "# " : "",
db);
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- fprintf(file,"%sSET @@session.pseudo_thread_id=%lu;\n",
+ my_b_printf(&cache,"%sSET @@session.pseudo_thread_id=%lu;\n",
commented ? "# " : "", (ulong)thread_id);
- fprintf(file, "%sLOAD DATA ",
+ my_b_printf(&cache, "%sLOAD DATA ",
commented ? "# " : "");
if (check_fname_outside_temp_buf())
- fprintf(file, "LOCAL ");
- fprintf(file, "INFILE '%-*s' ", fname_len, fname);
+ my_b_printf(&cache, "LOCAL ");
+ my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname);
if (sql_ex.opt_flags & REPLACE_FLAG)
- fprintf(file," REPLACE ");
+ my_b_printf(&cache," REPLACE ");
else if (sql_ex.opt_flags & IGNORE_FLAG)
- fprintf(file," IGNORE ");
+ my_b_printf(&cache," IGNORE ");
- fprintf(file, "INTO TABLE `%s`", table_name);
- fprintf(file, " FIELDS TERMINATED BY ");
- pretty_print_str(file, sql_ex.field_term, sql_ex.field_term_len);
+ my_b_printf(&cache, "INTO TABLE `%s`", table_name);
+ my_b_printf(&cache, " FIELDS TERMINATED BY ");
+ pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len);
if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- fprintf(file," OPTIONALLY ");
- fprintf(file, " ENCLOSED BY ");
- pretty_print_str(file, sql_ex.enclosed, sql_ex.enclosed_len);
+ my_b_printf(&cache," OPTIONALLY ");
+ my_b_printf(&cache, " ENCLOSED BY ");
+ pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len);
- fprintf(file, " ESCAPED BY ");
- pretty_print_str(file, sql_ex.escaped, sql_ex.escaped_len);
+ my_b_printf(&cache, " ESCAPED BY ");
+ pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len);
- fprintf(file," LINES TERMINATED BY ");
- pretty_print_str(file, sql_ex.line_term, sql_ex.line_term_len);
+ my_b_printf(&cache," LINES TERMINATED BY ");
+ pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len);
if (sql_ex.line_start)
{
- fprintf(file," STARTING BY ");
- pretty_print_str(file, sql_ex.line_start, sql_ex.line_start_len);
+ my_b_printf(&cache," STARTING BY ");
+ pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len);
}
if ((long) skip_lines > 0)
- fprintf(file, " IGNORE %ld LINES", (long) skip_lines);
+ my_b_printf(&cache, " IGNORE %ld LINES", (long) skip_lines);
if (num_fields)
{
uint i;
const char* field = fields;
- fprintf(file, " (");
+ my_b_printf(&cache, " (");
for (i = 0; i < num_fields; i++)
{
if (i)
- fputc(',', file);
- fprintf(file, field);
+ my_b_printf(&cache, ",");
+ my_b_printf(&cache, field);
field += field_lens[i] + 1;
}
- fputc(')', file);
+ my_b_printf(&cache, ")");
}
- fprintf(file, ";\n");
+ my_b_printf(&cache, ";\n");
DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -3185,17 +3323,16 @@ void Rotate_log_event::pack_info(Protocol *protocol)
void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
char buf[22];
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fprintf(file, "\tRotate to ");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tRotate to ");
if (new_log_ident)
- my_fwrite(file, (byte*) new_log_ident, (uint)ident_len,
- MYF(MY_NABP | MY_WME));
- fprintf(file, " pos: %s", llstr(pos, buf));
- fputc('\n', file);
- fflush(file);
+ my_b_write(&cache, (byte*) new_log_ident, (uint)ident_len);
+ my_b_printf(&cache, " pos: %s\n", llstr(pos, buf));
}
#endif /* MYSQL_CLIENT */
@@ -3407,14 +3544,16 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
char llbuff[22];
const char *msg;
LINT_INIT(msg);
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tIntvar\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tIntvar\n");
}
- fprintf(file, "SET ");
+ my_b_printf(&cache, "SET ");
switch (type) {
case LAST_INSERT_ID_EVENT:
msg="LAST_INSERT_ID";
@@ -3427,8 +3566,7 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
msg="INVALID_INT";
break;
}
- fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff));
- fflush(file);
+ my_b_printf(&cache, "%s=%s;\n", msg, llstr(val,llbuff));
}
#endif
@@ -3497,15 +3635,17 @@ bool Rand_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
char llbuff[22],llbuff2[22];
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tRand\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tRand\n");
}
- fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n",
- llstr(seed1, llbuff),llstr(seed2, llbuff2));
- fflush(file);
+ my_b_printf(&cache, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n",
+ llstr(seed1, llbuff),llstr(seed2, llbuff2));
}
#endif /* MYSQL_CLIENT */
@@ -3567,16 +3707,18 @@ bool Xid_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
char buf[64];
longlong10_to_str(xid, buf, 10);
- print_header(file, print_event_info);
- fprintf(file, "\tXid = %s\n", buf);
- fflush(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tXid = %s\n", buf);
}
- fprintf(file, "COMMIT;\n");
+ my_b_printf(&cache, "COMMIT;\n");
}
#endif /* MYSQL_CLIENT */
@@ -3766,19 +3908,22 @@ bool User_var_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tUser_var\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tUser_var\n");
}
- fprintf(file, "SET @`");
- my_fwrite(file, (byte*) name, (uint) (name_len), MYF(MY_NABP | MY_WME));
- fprintf(file, "`");
+ my_b_printf(&cache, "SET @`");
+ my_b_write(&cache, (byte*) name, (uint) (name_len));
+ my_b_printf(&cache, "`");
if (is_null)
{
- fprintf(file, ":=NULL;\n");
+ my_b_printf(&cache, ":=NULL;\n");
}
else
{
@@ -3786,12 +3931,12 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
case REAL_RESULT:
double real_val;
float8get(real_val, val);
- fprintf(file, ":=%.14g;\n", real_val);
+ my_b_printf(&cache, ":=%.14g;\n", real_val);
break;
case INT_RESULT:
char int_buf[22];
longlong10_to_str(uint8korr(val), int_buf, -10);
- fprintf(file, ":=%s;\n", int_buf);
+ my_b_printf(&cache, ":=%s;\n", int_buf);
break;
case DECIMAL_RESULT:
{
@@ -3807,7 +3952,7 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
bin2decimal(val+2, &dec, precision, scale);
decimal2string(&dec, str_buf, &str_len, 0, 0, 0);
str_buf[str_len]= 0;
- fprintf(file, ":=%s;\n",str_buf);
+ my_b_printf(&cache, ":=%s;\n",str_buf);
break;
}
case STRING_RESULT:
@@ -3843,9 +3988,9 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
Generate an unusable command (=> syntax error) is probably the best
thing we can do here.
*/
- fprintf(file, ":=???;\n");
+ my_b_printf(&cache, ":=???;\n");
else
- fprintf(file, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name);
+ my_b_printf(&cache, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name);
my_afree(hex_str);
}
break;
@@ -3855,7 +4000,6 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
return;
}
}
- fflush(file);
}
#endif
@@ -3939,13 +4083,14 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli)
#ifdef HAVE_REPLICATION
#ifdef MYSQL_CLIENT
-void Unknown_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+void Unknown_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "# %s", "Unknown event\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n# %s", "Unknown event\n");
}
#endif
@@ -4012,12 +4157,13 @@ Slave_log_event::~Slave_log_event()
#ifdef MYSQL_CLIENT
void Slave_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
char llbuff[22];
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "\
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n\
Slave: master_host: '%s' master_port: %d master_log: '%s' master_pos: %s\n",
master_host, master_port, master_log, llstr(master_pos, llbuff));
}
@@ -4097,12 +4243,14 @@ int Slave_log_event::exec_event(struct st_relay_log_info* rli)
#ifdef MYSQL_CLIENT
void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fprintf(file, "\tStop\n");
- fflush(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tStop\n");
}
#endif /* MYSQL_CLIENT */
@@ -4277,6 +4425,8 @@ Create_file_log_event::Create_file_log_event(const char* buf, uint len,
void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
bool enable_local)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
{
if (enable_local && check_fname_outside_temp_buf())
@@ -4292,10 +4442,10 @@ void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info
That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
SHOW BINLOG EVENTS we don't.
*/
- fprintf(file, "#");
+ my_b_printf(&cache, "#");
}
- fprintf(file, " file_id: %d block_len: %d\n", file_id, block_len);
+ my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len);
}
@@ -4465,12 +4615,13 @@ bool Append_block_log_event::write(IO_CACHE* file)
void Append_block_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#%s: file_id: %d block_len: %d\n",
- get_type_str(), file_id, block_len);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#%s: file_id: %d block_len: %d\n",
+ get_type_str(), file_id, block_len);
}
#endif /* MYSQL_CLIENT */
@@ -4608,11 +4759,12 @@ bool Delete_file_log_event::write(IO_CACHE* file)
void Delete_file_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#Delete_file: file_id=%u\n", file_id);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#Delete_file: file_id=%u\n", file_id);
}
#endif /* MYSQL_CLIENT */
@@ -4703,12 +4855,13 @@ bool Execute_load_log_event::write(IO_CACHE* file)
void Execute_load_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#Exec_load: file_id=%d\n",
- file_id);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#Exec_load: file_id=%d\n",
+ file_id);
}
#endif
@@ -4925,29 +5078,30 @@ void Execute_load_query_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info,
const char *local_fname)
{
- print_query_header(file, print_event_info);
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
+ print_query_header(&cache, print_event_info);
if (local_fname)
{
- my_fwrite(file, (byte*) query, fn_pos_start, MYF(MY_NABP | MY_WME));
- fprintf(file, " LOCAL INFILE \'");
- fprintf(file, local_fname);
- fprintf(file, "\'");
+ my_b_write(&cache, (byte*) query, fn_pos_start);
+ my_b_printf(&cache, " LOCAL INFILE \'");
+ my_b_printf(&cache, local_fname);
+ my_b_printf(&cache, "\'");
if (dup_handling == LOAD_DUP_REPLACE)
- fprintf(file, " REPLACE");
- fprintf(file, " INTO");
- my_fwrite(file, (byte*) query + fn_pos_end, q_len-fn_pos_end,
- MYF(MY_NABP | MY_WME));
- fprintf(file, ";\n");
+ my_b_printf(&cache, " REPLACE");
+ my_b_printf(&cache, " INTO");
+ my_b_write(&cache, (byte*) query + fn_pos_end, q_len-fn_pos_end);
+ my_b_printf(&cache, ";\n");
}
else
{
- my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
- fprintf(file, ";\n");
+ my_b_write(&cache, (byte*) query, q_len);
+ my_b_printf(&cache, ";\n");
}
if (!print_event_info->short_form)
- fprintf(file, "# file_id: %d \n", file_id);
+ my_b_printf(&cache, "# file_id: %d \n", file_id);
}
#endif
@@ -5797,6 +5951,31 @@ void Rows_log_event::pack_info(Protocol *protocol)
}
#endif
+#ifdef MYSQL_CLIENT
+void Rows_log_event::print_helper(FILE *file,
+ PRINT_EVENT_INFO *print_event_info,
+ char const *const name)
+{
+ IO_CACHE *const head= &print_event_info->head_cache;
+ IO_CACHE *const body= &print_event_info->body_cache;
+ if (!print_event_info->short_form)
+ {
+ bool const last_stmt_event= get_flags(STMT_END_F);
+ print_header(head, print_event_info, !last_stmt_event);
+ my_b_printf(head, "\t%s: table id %lu", name, m_table_id);
+ print_base64(body, print_event_info, !last_stmt_event);
+ }
+
+ if (get_flags(STMT_END_F))
+ {
+ my_b_copy_to_file(head, file);
+ my_b_copy_to_file(body, file);
+ reinit_io_cache(head, WRITE_CACHE, 0, FALSE, TRUE);
+ reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE);
+ }
+}
+#endif
+
/**************************************************************************
Table_map_log_event member functions and support functions
**************************************************************************/
@@ -6148,10 +6327,11 @@ void Table_map_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
{
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tTable_map: `%s`.`%s` mapped to number %lu\n",
- m_dbnam, m_tblnam, m_table_id);
- print_base64(file, print_event_info);
+ print_header(&print_event_info->head_cache, print_event_info, TRUE);
+ my_b_printf(&print_event_info->head_cache,
+ "\tTable_map: `%s`.`%s` mapped to number %lu\n",
+ m_dbnam, m_tblnam, m_table_id);
+ print_base64(&print_event_info->body_cache, print_event_info, TRUE);
}
}
#endif
@@ -6513,12 +6693,7 @@ int Write_rows_log_event::do_exec_row(TABLE *table)
#ifdef MYSQL_CLIENT
void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info)
{
- if (!print_event_info->short_form)
- {
- print_header(file, print_event_info);
- fprintf(file, "\tWrite_rows: table id %lu", m_table_id);
- print_base64(file, print_event_info);
- }
+ Rows_log_event::print_helper(file, print_event_info, "Write_rows");
}
#endif
@@ -6846,12 +7021,7 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
void Delete_rows_log_event::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
- if (!print_event_info->short_form)
- {
- print_header(file, print_event_info);
- fprintf(file, "\tDelete_rows: table id %lu", m_table_id);
- print_base64(file, print_event_info);
- }
+ Rows_log_event::print_helper(file, print_event_info, "Delete_rows");
}
#endif
@@ -7017,12 +7187,7 @@ int Update_rows_log_event::do_exec_row(TABLE *table)
void Update_rows_log_event::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
- if (!print_event_info->short_form)
- {
- print_header(file, print_event_info);
- fprintf(file, "\tUpdate_rows: table id %lu", m_table_id);
- print_base64(file, print_event_info);
- }
+ Rows_log_event::print_helper(file, print_event_info, "Update_rows");
}
#endif
diff --git a/sql/log_event.h b/sql/log_event.h
index 801a83dcc97..81ce2f18b4d 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -519,14 +519,30 @@ typedef struct st_print_event_info
bzero(db, sizeof(db));
bzero(charset, sizeof(charset));
bzero(time_zone_str, sizeof(time_zone_str));
+ uint const flags = MYF(MY_WME | MY_NABP);
+ init_io_cache(&head_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags);
+ init_io_cache(&body_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags);
}
+ ~st_print_event_info() {
+ end_io_cache(&head_cache);
+ end_io_cache(&body_cache);
+ }
+
+
/* Settings on how to print the events */
bool short_form;
bool base64_output;
my_off_t hexdump_from;
uint8 common_header_len;
+ /*
+ These two caches are used by the row-based replication events to
+ collect the header information and the main body of the events
+ making up a statement.
+ */
+ IO_CACHE head_cache;
+ IO_CACHE body_cache;
} PRINT_EVENT_INFO;
#endif
@@ -637,9 +653,11 @@ public:
const Format_description_log_event *description_event);
/* print*() functions are used by mysqlbinlog */
virtual void print(FILE* file, PRINT_EVENT_INFO* print_event_info) = 0;
- void print_timestamp(FILE* file, time_t *ts = 0);
- void print_header(FILE* file, PRINT_EVENT_INFO* print_event_info);
- void print_base64(FILE* file, PRINT_EVENT_INFO* print_event_info);
+ void print_timestamp(IO_CACHE* file, time_t *ts = 0);
+ void print_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
+ bool is_more);
+ void print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
+ bool is_more);
#endif
static void *operator new(size_t size)
@@ -804,7 +822,7 @@ public:
uint32 q_len_arg);
#endif /* HAVE_REPLICATION */
#else
- void print_query_header(FILE* file, PRINT_EVENT_INFO* print_event_info);
+ void print_query_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info);
void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
@@ -1864,6 +1882,10 @@ protected:
Log_event_type event_type,
const Format_description_log_event *description_event);
+#ifdef MYSQL_CLIENT
+ void print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
+#endif
+
#ifndef MYSQL_CLIENT
virtual int do_add_row_data(byte *data, my_size_t length);
#endif
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c6f5ff9c3fe..339ca9d965a 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -140,7 +140,18 @@ MY_LOCALE *my_locale_by_name(const char *name);
#define MAX_ACCEPT_RETRY 10 // Test accept this many times
#define MAX_FIELDS_BEFORE_HASH 32
#define USER_VARS_HASH_SIZE 16
-#define STACK_MIN_SIZE 8192 // Abort if less stack during eval.
+
+/*
+ Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper
+ Drake, libc6 2.3.6-0ubuntu2, Linux kernel 2.6.15-27-686, on x86. (Added
+ 100 bytes as reasonable buffer against growth and other environments'
+ requirements.)
+
+ Feel free to raise this by the smallest amount you can to get the
+ "execution_constants" test to pass.
+ */
+#define STACK_MIN_SIZE 9336 // Abort if less stack during eval.
+
#define STACK_MIN_SIZE_FOR_OPEN 1024*80
#define STACK_BUFF_ALLOC 256 // For stack overrun checks
#ifndef MYSQLD_NET_RETRY_COUNT
@@ -458,7 +469,8 @@ enum enum_parsing_place
NO_MATTER,
IN_HAVING,
SELECT_LIST,
- IN_WHERE
+ IN_WHERE,
+ IN_ON
};
struct st_table;
@@ -588,7 +600,7 @@ void get_default_definer(THD *thd, LEX_USER *definer);
LEX_USER *create_default_definer(THD *thd);
LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name);
LEX_USER *get_current_user(THD *thd, LEX_USER *user);
-bool check_string_length(CHARSET_INFO *cs, LEX_STRING *str,
+bool check_string_length(LEX_STRING *str,
const char *err_msg, uint max_length);
enum enum_mysql_completiontype {
@@ -1417,7 +1429,7 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
uint key_length);
bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length);
void key_unpack(String *to,TABLE *form,uint index);
-bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields);
+bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields);
int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
int key_rec_cmp(void *key_info, byte *a, byte *b);
@@ -1427,10 +1439,12 @@ void sql_perror(const char *message);
int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
-void sql_print_error(const char *format, ...);
-void sql_print_warning(const char *format, ...);
-void sql_print_information(const char *format, ...);
-typedef void (*sql_print_message_func)(const char *format, ...);
+void sql_print_error(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
+void sql_print_warning(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
+void sql_print_information(const char *format, ...)
+ ATTRIBUTE_FORMAT(printf, 1, 2);
+typedef void (*sql_print_message_func)(const char *format, ...)
+ ATTRIBUTE_FORMAT(printf, 1, 2);
extern sql_print_message_func sql_print_message_handlers[];
/* type of the log table */
@@ -1637,19 +1651,7 @@ extern SHOW_COMP_OPTION have_ndbcluster;
extern SHOW_COMP_OPTION have_partition_db;
extern SHOW_COMP_OPTION have_merge_db;
-#ifdef WITH_CSV_STORAGE_ENGINE
-extern handlerton *tina_hton;
-#endif
-#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
-extern handlerton *ndbcluster_hton;
-#endif
-#ifdef WITH_PARTITION_STORAGE_ENGINE
extern handlerton *partition_hton;
-#endif
-#ifdef WITH_MYISAMMRG_STORAGE_ENGINE
-extern handlerton *myisammrg_hton;
-#endif
-
extern handlerton *myisam_hton;
extern handlerton *heap_hton;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index a3844ea8fc0..6674dd87757 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -354,6 +354,14 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0;
my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0;
my_bool opt_log_slave_updates= 0;
my_bool opt_innodb;
+
+/*
+ Legacy global handlerton. These will be removed (please do not add more).
+*/
+handlerton *heap_hton;
+handlerton *myisam_hton;
+handlerton *partition_hton;
+
#ifdef WITH_INNOBASE_STORAGE_ENGINE
extern ulong innobase_fast_shutdown;
extern ulong innobase_large_page_size;
@@ -1091,7 +1099,7 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
extern "C" sig_handler print_signal_warning(int sig)
{
if (global_system_variables.log_warnings)
- sql_print_warning("Got signal %d from thread %d", sig,my_thread_id());
+ sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id());
#ifdef DONT_REMEMBER_SIGNAL
my_sigset(sig,print_signal_warning); /* int. thread system calls */
#endif
@@ -1601,8 +1609,8 @@ static void network_init(void)
if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
{
- sql_print_error("The socket file path is too long (> %d): %s",
- sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
+ sql_print_error("The socket file path is too long (> %lu): %s",
+ sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
unireg_abort(1);
}
if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)
@@ -2953,7 +2961,7 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
}
if (err)
{
- sql_print_error("Fatal: can't %s OpenSSL %s lock", what);
+ sql_print_error("Fatal: can't %s OpenSSL lock", what);
abort();
}
}
@@ -7656,14 +7664,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
exit(1);
}
switch (method-1) {
- case 0:
- method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL;
+ case 2:
+ method_conv= MI_STATS_METHOD_IGNORE_NULLS;
break;
case 1:
method_conv= MI_STATS_METHOD_NULLS_EQUAL;
break;
- case 2:
- method_conv= MI_STATS_METHOD_IGNORE_NULLS;
+ case 0:
+ default:
+ method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL;
break;
}
global_system_variables.myisam_stats_method= method_conv;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index d8922489d5f..0abb37b5345 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -4913,9 +4913,17 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
{
Item_func_in *func=(Item_func_in*) cond_func;
+ /*
+ Array for IN() is constructed when all values have the same result
+ type. Tree won't be built for values with different result types,
+ so we check it here to avoid unnecessary work.
+ */
+ if (!func->array)
+ break;
+
if (inv)
{
- if (func->array && func->cmp_type != ROW_RESULT)
+ if (func->array->result_type() != ROW_RESULT)
{
/*
We get here for conditions in form "t.key NOT IN (c1, c2, ...)",
@@ -6946,8 +6954,9 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
ulong count=count_key_part_usage(root,pos->next_key_part);
if (count > pos->next_key_part->use_count)
{
- sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu should be %lu",
- pos,pos->next_key_part->use_count,count);
+ sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu "
+ "should be %lu", (long unsigned int)pos,
+ pos->next_key_part->use_count, count);
return;
}
pos->next_key_part->test_use_count(root);
@@ -6955,7 +6964,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
}
if (e_count != elements)
sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx",
- e_count, elements, (gptr) this);
+ e_count, elements, (long unsigned int) this);
}
#endif
@@ -7526,42 +7535,42 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
}
-bool QUICK_SELECT_I::check_if_keys_used(List<Item> *fields)
+bool QUICK_SELECT_I::is_keys_used(const MY_BITMAP *fields)
{
- return check_if_key_used(head, index, *fields);
+ return is_key_used(head, index, fields);
}
-bool QUICK_INDEX_MERGE_SELECT::check_if_keys_used(List<Item> *fields)
+bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (check_if_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_INTERSECT_SELECT::check_if_keys_used(List<Item> *fields)
+bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (check_if_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_UNION_SELECT::check_if_keys_used(List<Item> *fields)
+bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_SELECT_I *quick;
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
while ((quick= it++))
{
- if (quick->check_if_keys_used(fields))
+ if (quick->is_keys_used(fields))
return 1;
}
return 0;
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 6f26f9f782c..6099608f7cd 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -224,10 +224,9 @@ public:
virtual void add_info_string(String *str) {};
/*
Return 1 if any index used by this quick select
- a) uses field that is listed in passed field list or
- b) is automatically updated (like a timestamp)
+ uses field which is marked in passed bitmap.
*/
- virtual bool check_if_keys_used(List<Item> *fields);
+ virtual bool is_keys_used(const MY_BITMAP *fields);
/*
rowid of last row retrieved by this quick select. This is used only when
@@ -425,7 +424,7 @@ public:
int get_type() { return QS_TYPE_INDEX_MERGE; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool check_if_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -484,7 +483,7 @@ public:
int get_type() { return QS_TYPE_ROR_INTERSECT; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool check_if_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -538,7 +537,7 @@ public:
int get_type() { return QS_TYPE_ROR_UNION; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool check_if_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 2a06a649eca..a20fca9404b 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -443,11 +443,9 @@ bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts)
DBUG_RETURN(TRUE);
}
} while (++i < no_parts);
- if (engine_array[0] == myisammrg_hton ||
- engine_array[0] == tina_hton)
+ if (engine_array[0]->flags & HTON_NO_PARTITION)
{
- my_error(ER_PARTITION_MERGE_ERROR, MYF(0),
- engine_array[0] == myisammrg_hton ? "MyISAM Merge" : "CSV");
+ my_error(ER_PARTITION_MERGE_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
DBUG_RETURN(FALSE);
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 5fb209b8b11..4369c288cd5 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -1218,8 +1218,8 @@ static void fix_tx_isolation(THD *thd, enum_var_type type)
thd->variables.tx_isolation);
}
-static void fix_completion_type(THD *thd __attribute__(unused),
- enum_var_type type __attribute__(unused)) {}
+static void fix_completion_type(THD *thd __attribute__((unused)),
+ enum_var_type type __attribute__((unused))) {}
static int check_completion_type(THD *thd, set_var *var)
{
@@ -1258,14 +1258,14 @@ static void fix_net_retry_count(THD *thd, enum_var_type type)
thd->net.retry_count=thd->variables.net_retry_count;
}
#else /* HAVE_REPLICATION */
-static void fix_net_read_timeout(THD *thd __attribute__(unused),
- enum_var_type type __attribute__(unused))
+static void fix_net_read_timeout(THD *thd __attribute__((unused)),
+ enum_var_type type __attribute__((unused)))
{}
-static void fix_net_write_timeout(THD *thd __attribute__(unused),
- enum_var_type type __attribute__(unused))
+static void fix_net_write_timeout(THD *thd __attribute__((unused)),
+ enum_var_type type __attribute__((unused)))
{}
-static void fix_net_retry_count(THD *thd __attribute__(unused),
- enum_var_type type __attribute__(unused))
+static void fix_net_retry_count(THD *thd __attribute__((unused)),
+ enum_var_type type __attribute__((unused)))
{}
#endif /* HAVE_REPLICATION */
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index c90e771fd40..0bcd15c2b82 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -3791,7 +3791,7 @@ ER_WRONG_MRG_TABLE
cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì"
dan "Tabellerne i MERGE er ikke defineret ens"
nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
- eng "All tables in the MERGE table are not identically defined"
+ eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist"
est "Kõik tabelid MERGE tabeli määratluses ei ole identsed"
fre "Toutes les tables de la table de type MERGE n'ont pas la même définition"
ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
@@ -5960,34 +5960,48 @@ ER_EVENT_SET_VAR_ERROR
eng "Error during starting/stopping of the scheduler. Error code %u"
ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u"
ER_PARTITION_MERGE_ERROR
- eng "%s handler cannot be used in partitioned tables"
- ger "%s-Handler kann in partitionierten Tabellen nicht verwendet werden"
- swe "%s kan inte användas i en partitionerad tabell"
+ eng "Engine cannot be used in partitioned tables"
+ ger "Engine kann in partitionierten Tabellen nicht verwendet werden"
+ swe "Engine inte användas i en partitionerad tabell"
ER_CANT_ACTIVATE_LOG
eng "Cannot activate '%-.64s' log"
ger "Kann Logdatei '%-.64s' nicht aktivieren"
ER_RBR_NOT_AVAILABLE
eng "The server was not built with row-based replication"
+ER_BASE64_DECODE_ERROR
+ eng "Decoding of base64 string failed"
+ swe "Avkodning av base64 sträng misslyckades"
ger "Der Server hat keine zeilenbasierte Replikation"
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
eng "Triggers can not be created on system tables"
ger "Trigger können nicht auf Systemtabellen erzeugt werden"
ER_EVENT_RECURSIVITY_FORBIDDEN
eng "Recursivity of EVENT DDL statements is forbidden when body is present"
+ ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert"
ER_EVENTS_DB_ERROR
eng "Cannot proceed because the tables used by events were found damaged at server start"
+ ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden"
ER_ONLY_INTEGERS_ALLOWED
- eng "Only normal integers allowed as number here"
+ eng "Only integers allowed as number here"
+ ger "An dieser Stelle sind nur Ganzzahlen zulässig"
ER_AUTOINC_READ_FAILED
eng "Failed to read auto-increment value from storage engine"
+ ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
ER_USERNAME
- eng "user name"
+ eng "user name"
+ ger "Benutzername"
ER_HOSTNAME
- eng "host name"
+ eng "host name"
+ ger "Hostname"
ER_WRONG_STRING_LENGTH
- eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
ER_UNSUPORTED_LOG_ENGINE
eng "This storage engine cannot be used for log tables""
+ ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden"
ER_BAD_LOG_STATEMENT
eng "You cannot '%s' a log table if logging is enabled"
+ ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
+ER_NON_INSERTABLE_TABLE
+ eng "The target table %-.100s of the %s is not insertable-into"
diff --git a/sql/slave.cc b/sql/slave.cc
index f9645fc83e3..1852ff68070 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -3101,17 +3101,22 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT))
{
DBUG_PRINT("info", ("event skipped"));
- if (thd->options & OPTION_BEGIN)
- rli->inc_event_relay_log_pos();
- else
- {
- rli->inc_group_relay_log_pos((type_code == ROTATE_EVENT ||
- type_code == STOP_EVENT ||
- type_code == FORMAT_DESCRIPTION_EVENT) ?
- LL(0) : ev->log_pos,
- 1/* skip lock*/);
- flush_relay_log_info(rli);
- }
+ /*
+ We only skip the event here and do not increase the group log
+ position. In the event that we have to restart, this means
+ that we might have to skip the event again, but that is a
+ minor issue.
+
+ If we were to increase the group log position when skipping an
+ event, it might be that we are restarting at the wrong
+ position and have events before that we should have executed,
+ so not increasing the group log position is a sure bet in this
+ case.
+
+ In this way, we just step the group log position when we
+ *know* that we are at the end of a group.
+ */
+ rli->inc_event_relay_log_pos();
/*
Protect against common user error of setting the counter to 1
@@ -4447,7 +4452,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
suppress_warnings= 0;
sql_print_error("Slave I/O thread: error %s to master \
'%s@%s:%d': \
-Error: '%s' errno: %d retry-time: %d retries: %d",
+Error: '%s' errno: %d retry-time: %d retries: %lu",
(reconnect ? "reconnecting" : "connecting"),
mi->user,mi->host,mi->port,
mysql_error(mysql), last_errno,
diff --git a/sql/slave.h b/sql/slave.h
index 9d76277da0d..e70b2e4b326 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -152,7 +152,7 @@ typedef struct st_master_info
/* the variables below are needed because we can change masters on the fly */
char master_log_name[FN_REFLEN];
char host[HOSTNAME_LENGTH+1];
- char user[USERNAME_BYTE_LENGTH+1];
+ char user[USERNAME_LENGTH+1];
char password[MAX_PASSWORD_LENGTH+1];
my_bool ssl; // enables use of SSL connection if true
char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN];
@@ -272,7 +272,8 @@ const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli,
- int err_code, const char* msg, ...);
+ int err_code, const char* msg, ...)
+ ATTRIBUTE_FORMAT(printf, 4, 5);
void end_slave(); /* clean up */
void init_master_info_with_options(MASTER_INFO* mi);
diff --git a/sql/sp.cc b/sql/sp.cc
index 8ddf55e1837..3411a18c17a 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -387,16 +387,16 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
LEX *old_lex= thd->lex, newlex;
String defstr;
- char old_db_buf[NAME_BYTE_LEN+1];
+ char old_db_buf[NAME_LEN+1];
LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
ulong old_sql_mode= thd->variables.sql_mode;
ha_rows old_select_limit= thd->variables.select_limit;
sp_rcontext *old_spcont= thd->spcont;
-
- char definer_user_name_holder[USERNAME_BYTE_LENGTH + 1];
+
+ char definer_user_name_holder[USERNAME_LENGTH + 1];
LEX_STRING definer_user_name= { definer_user_name_holder,
- USERNAME_BYTE_LENGTH };
+ USERNAME_LENGTH };
char definer_host_name_holder[HOSTNAME_LENGTH + 1];
LEX_STRING definer_host_name= { definer_host_name_holder, HOSTNAME_LENGTH };
@@ -495,7 +495,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
int ret;
TABLE *table;
char definer[USER_HOST_BUFF_SIZE];
- char old_db_buf[NAME_BYTE_LEN+1];
+ char old_db_buf[NAME_LEN+1];
LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
DBUG_ENTER("db_create_routine");
@@ -1606,7 +1606,17 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
*/
if (!thd->net.report_error)
{
- char n[NAME_LEN*2+2];
+ /*
+ SP allows full NAME_LEN chars thus he have to allocate enough
+ size in bytes. Otherwise there is stack overrun could happen
+ if multibyte sequence is `name`. `db` is still safe because the
+ rest of the server checks agains NAME_LEN bytes and not chars.
+ Hence, the overrun happens only if the name is in length > 32 and
+ uses multibyte (cyrillic, greek, etc.)
+
+ !! Change 3 with SYSTEM_CHARSET_MBMAXLEN when it's defined.
+ */
+ char n[NAME_LEN*3*2+2];
/* m_qname.str is not always \0 terminated */
memcpy(n, name.m_qname.str, name.m_qname.length);
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index db5f35cc5ee..a061ae12dd1 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -962,7 +962,7 @@ bool
sp_head::execute(THD *thd)
{
DBUG_ENTER("sp_head::execute");
- char old_db_buf[NAME_BYTE_LEN+1];
+ char old_db_buf[NAME_LEN+1];
LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
sp_rcontext *ctx;
@@ -2010,8 +2010,8 @@ sp_head::set_info(longlong created, longlong modified,
void
sp_head::set_definer(const char *definer, uint definerlen)
{
- char user_name_holder[USERNAME_BYTE_LENGTH + 1];
- LEX_STRING user_name= { user_name_holder, USERNAME_BYTE_LENGTH };
+ char user_name_holder[USERNAME_LENGTH + 1];
+ LEX_STRING user_name= { user_name_holder, USERNAME_LENGTH };
char host_name_holder[HOSTNAME_LENGTH + 1];
LEX_STRING host_name= { host_name_holder, HOSTNAME_LENGTH };
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 5badccef544..c55fc744cd0 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -169,7 +169,7 @@ static byte* acl_entry_get_key(acl_entry *entry,uint *length,
}
#define IP_ADDR_STRLEN (3+1+3+1+3+1+3)
-#define ACL_KEY_LENGTH (IP_ADDR_STRLEN+1+NAME_BYTE_LEN+1+USERNAME_BYTE_LENGTH+1)
+#define ACL_KEY_LENGTH (IP_ADDR_STRLEN+1+NAME_LEN+1+USERNAME_LENGTH+1)
static DYNAMIC_ARRAY acl_hosts,acl_users,acl_dbs;
static MEM_ROOT mem, memex;
@@ -312,7 +312,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
READ_RECORD read_record_info;
my_bool return_val= 1;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
- char tmp_name[NAME_BYTE_LEN+1];
+ char tmp_name[NAME_LEN+1];
int password_length;
DBUG_ENTER("acl_load");
@@ -2400,7 +2400,7 @@ static GRANT_NAME *name_hash_search(HASH *name_hash,
const char *user, const char *tname,
bool exact)
{
- char helping [NAME_BYTE_LEN*2+USERNAME_BYTE_LENGTH+3];
+ char helping [NAME_LEN*2+USERNAME_LENGTH+3];
uint len;
GRANT_NAME *grant_name,*found=0;
HASH_SEARCH_STATE state;
@@ -3307,7 +3307,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
{
List_iterator <LEX_USER> str_list (list);
LEX_USER *Str, *tmp_Str;
- char tmp_db[NAME_BYTE_LEN+1];
+ char tmp_db[NAME_LEN+1];
bool create_new_users=0;
TABLE_LIST tables[2];
DBUG_ENTER("mysql_grant");
@@ -3371,7 +3371,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
{
result= TRUE;
continue;
- }
+ }
if (replace_user_table(thd, tables[0].table, *Str,
(!db ? rights : 0), revoke_grant, create_new_users,
test(thd->variables.sql_mode &
@@ -4011,7 +4011,7 @@ err2:
bool check_grant_db(THD *thd,const char *db)
{
Security_context *sctx= thd->security_ctx;
- char helping [NAME_BYTE_LEN+USERNAME_BYTE_LENGTH+2];
+ char helping [NAME_LEN+USERNAME_LENGTH+2];
uint len;
bool error= 1;
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 9f1a0561138..264e3e2b988 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -806,9 +806,9 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows)
else if (num_info.decimals) // DOUBLE(%d,%d) sometime
{
if (num_info.dval > -FLT_MAX && num_info.dval < FLT_MAX)
- sprintf(buff, "FLOAT(%d,%d)", num_info.integers, num_info.decimals);
+ sprintf(buff, "FLOAT(%d,%d)", (num_info.integers + num_info.decimals), num_info.decimals);
else
- sprintf(buff, "DOUBLE(%d,%d)", num_info.integers, num_info.decimals);
+ sprintf(buff, "DOUBLE(%d,%d)", (num_info.integers + num_info.decimals), num_info.decimals);
}
else if (ev_num_info.llval >= -128 &&
ev_num_info.ullval <=
@@ -915,10 +915,10 @@ void field_real::get_opt_type(String *answer,
else
{
if (min_arg >= -FLT_MAX && max_arg <= FLT_MAX)
- sprintf(buff, "FLOAT(%d,%d)", (int) max_length - (item->decimals + 1),
+ sprintf(buff, "FLOAT(%d,%d)", (int) max_length - (item->decimals + 1) + max_notzero_dec_len,
max_notzero_dec_len);
else
- sprintf(buff, "DOUBLE(%d,%d)", (int) max_length - (item->decimals + 1),
+ sprintf(buff, "DOUBLE(%d,%d)", (int) max_length - (item->decimals + 1) + max_notzero_dec_len,
max_notzero_dec_len);
answer->append(buff, (uint) strlen(buff));
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 9e3049d433b..a530c7f7fdc 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1461,8 +1461,11 @@ void update_non_unique_table_error(TABLE_LIST *update,
*/
if (update->view)
{
+ /* Issue the ER_NON_INSERTABLE_TABLE error for an INSERT */
if (update->view == duplicate->view)
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), update->alias, operation);
+ my_error(!strncmp(operation, "INSERT", 6) ?
+ ER_NON_INSERTABLE_TABLE : ER_NON_UPDATABLE_TABLE, MYF(0),
+ update->alias, operation);
else
my_error(ER_VIEW_PREVENT_UPDATE, MYF(0),
(duplicate->view ? duplicate->alias : update->alias),
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index 0939ad66cd0..23ca5330053 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -31,6 +31,7 @@
void mysql_client_binlog_statement(THD* thd)
{
+ DBUG_ENTER("mysql_client_binlog_statement");
DBUG_PRINT("info",("binlog base64: '%*s'",
(thd->lex->comment.length < 2048 ?
thd->lex->comment.length : 2048),
@@ -43,8 +44,8 @@ void mysql_client_binlog_statement(THD* thd)
my_bool nsok= thd->net.no_send_ok;
thd->net.no_send_ok= TRUE;
- const my_size_t coded_len= thd->lex->comment.length + 1;
- const my_size_t event_len= base64_needed_decoded_length(coded_len);
+ my_size_t coded_len= thd->lex->comment.length + 1;
+ my_size_t decoded_len= base64_needed_decoded_length(coded_len);
DBUG_ASSERT(coded_len > 0);
/*
@@ -57,9 +58,8 @@ void mysql_client_binlog_statement(THD* thd)
new Format_description_log_event(4);
const char *error= 0;
- char *buf= (char *) my_malloc(event_len, MYF(MY_WME));
+ char *buf= (char *) my_malloc(decoded_len, MYF(MY_WME));
Log_event *ev = 0;
- int res;
/*
Out of memory check
@@ -73,43 +73,97 @@ void mysql_client_binlog_statement(THD* thd)
thd->rli_fake->sql_thd= thd;
thd->rli_fake->no_storage= TRUE;
- res= base64_decode(thd->lex->comment.str, coded_len, buf);
-
- DBUG_PRINT("info",("binlog base64 decoded_len=%d, event_len=%d\n",
- res, uint4korr(buf + EVENT_LEN_OFFSET)));
- /*
- Note that 'res' is the correct event length, 'event_len' was
- calculated based on the base64-string that possibly contained
- extra spaces, so it can be longer than the real event.
- */
- if (res < EVENT_LEN_OFFSET
- || (uint) res != uint4korr(buf+EVENT_LEN_OFFSET))
+ for (char const *strptr= thd->lex->comment.str ;
+ strptr < thd->lex->comment.str + thd->lex->comment.length ; )
{
- my_error(ER_SYNTAX_ERROR, MYF(0));
- goto end;
- }
-
- ev= Log_event::read_log_event(buf, res, &error, desc);
+ char const *endptr= 0;
+ int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr);
+
+ DBUG_PRINT("info",
+ ("bytes_decoded=%d; strptr=0x%lu; endptr=0x%lu ('%c':%d)",
+ bytes_decoded, strptr, endptr, *endptr, *endptr));
+
+ if (bytes_decoded < 0)
+ {
+ my_error(ER_BASE64_DECODE_ERROR, MYF(0));
+ goto end;
+ }
+ else if (bytes_decoded == 0)
+ break; // If no bytes where read, the string contained only whitespace
+
+ DBUG_ASSERT(bytes_decoded > 0);
+ DBUG_ASSERT(endptr > strptr);
+ coded_len-= endptr - strptr;
+ strptr= endptr;
- DBUG_PRINT("info",("binlog base64 err=%s", error));
- if (!ev)
- {
/*
- This could actually be an out-of-memory, but it is more
- likely causes by a bad statement
+ Now we have one or more events stored in the buffer. The size of
+ the buffer is computed based on how much base64-encoded data
+ there were, so there should be ample space for the data (maybe
+ even too much, since a statement can consist of a considerable
+ number of events).
+
+ TODO: Switch to use a stream-based base64 encoder/decoder in
+ order to be able to read exactly what is necessary.
*/
- my_error(ER_SYNTAX_ERROR, MYF(0));
- goto end;
- }
- DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
- DBUG_PRINT("info",("buf+EVENT_TYPE_OFFSET=%d", buf+EVENT_TYPE_OFFSET));
+ DBUG_PRINT("info",("binlog base64 decoded_len=%d, bytes_decoded=%d",
+ decoded_len, bytes_decoded));
- ev->thd= thd;
- if (ev->exec_event(thd->rli_fake))
- {
- my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement");
- goto end;
+ /*
+ Now we start to read events of the buffer, until there are no
+ more.
+ */
+ for (char *bufptr= buf ; bytes_decoded > 0 ; )
+ {
+ /*
+ Checking that the first event in the buffer is not truncated.
+ */
+ ulong event_len= uint4korr(bufptr + EVENT_LEN_OFFSET);
+ DBUG_PRINT("info", ("event_len=%lu, bytes_decoded=%d",
+ event_len, bytes_decoded));
+ if (bytes_decoded < EVENT_LEN_OFFSET || (uint) bytes_decoded < event_len)
+ {
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ goto end;
+ }
+
+ ev= Log_event::read_log_event(bufptr, event_len, &error, desc);
+
+ DBUG_PRINT("info",("binlog base64 err=%s", error));
+ if (!ev)
+ {
+ /*
+ This could actually be an out-of-memory, but it is more likely
+ causes by a bad statement
+ */
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ goto end;
+ }
+
+ bytes_decoded -= event_len;
+ bufptr += event_len;
+
+ DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
+ DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET=0x%lx",
+ bufptr+EVENT_TYPE_OFFSET));
+ DBUG_PRINT("info", ("bytes_decoded=%d; bufptr=0x%lx; buf[EVENT_LEN_OFFSET]=%u",
+ bytes_decoded, bufptr, uint4korr(bufptr+EVENT_LEN_OFFSET)));
+ ev->thd= thd;
+ if (int err= ev->exec_event(thd->rli_fake))
+ {
+ DBUG_PRINT("info", ("exec_event() - error=%d", error));
+ /*
+ TODO: Maybe a better error message since the BINLOG statement
+ now contains several events.
+ */
+ my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement");
+ goto end;
+ }
+
+ delete ev;
+ ev= 0;
+ }
}
/*
@@ -126,10 +180,7 @@ end:
*/
thd->net.no_send_ok= nsok;
- if (ev)
- delete ev;
- if (desc)
- delete desc;
- if (buf)
- my_free(buf, MYF(0));
+ delete desc;
+ my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_VOID_RETURN;
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index e0831b0f8ee..ee3b8aa79fe 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -2389,7 +2389,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
tables_used->engine_data))
DBUG_RETURN(0);
- if (tables_used->table->s->db_type == myisammrg_hton)
+ if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM)
{
ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file;
MYRG_INFO *file = handler->myrg_info();
@@ -3013,7 +3013,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
"other non-cacheable table(s)"));
DBUG_RETURN(0);
}
- if (tables_used->table->s->db_type == myisammrg_hton)
+ if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM)
{
ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file;
MYRG_INFO *file = handler->myrg_info();
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ceaf568ac6b..3fd0e621422 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -630,7 +630,21 @@ bool THD::store_globals()
}
-/* Cleanup after a query */
+/*
+ Cleanup after query.
+
+ SYNOPSIS
+ THD::cleanup_after_query()
+
+ DESCRIPTION
+ This function is used to reset thread data to its default state.
+
+ NOTE
+ This function is not suitable for setting thread data to some
+ non-default values, as there is only one replication thread, so
+ different master threads may overwrite data of each other on
+ slave.
+*/
void THD::cleanup_after_query()
{
@@ -655,6 +669,7 @@ void THD::cleanup_after_query()
where= THD::DEFAULT_WHERE;
}
+
/*
Convert a string to another character set
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 6b46c9676f7..95283ec2fc8 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -930,6 +930,7 @@ public:
/*
Public interface to write RBR events to the binlog
*/
+ void binlog_start_trans_and_stmt();
int binlog_write_table_map(TABLE *table, bool is_transactional);
int binlog_write_row(TABLE* table, bool is_transactional,
MY_BITMAP const* cols, my_size_t colcnt,
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 2784e71ccae..46fae3ffb99 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -45,7 +45,7 @@ class Sensitive_cursor: public Server_side_cursor
query_id_t query_id;
struct Engine_info
{
- const handlerton *ht;
+ handlerton *ht;
void *read_view;
};
Engine_info ht_info[MAX_HA];
@@ -318,12 +318,12 @@ Sensitive_cursor::post_open(THD *thd)
info= &ht_info[0];
for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++)
{
- const handlerton *ht= *pht;
+ handlerton *ht= *pht;
close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT);
if (ht->create_cursor_read_view)
{
info->ht= ht;
- info->read_view= (ht->create_cursor_read_view)();
+ info->read_view= (ht->create_cursor_read_view)(ht, thd);
++info;
}
}
@@ -433,7 +433,7 @@ Sensitive_cursor::fetch(ulong num_rows)
thd->set_n_backup_active_arena(this, &backup_arena);
for (info= ht_info; info->read_view ; info++)
- (info->ht->set_cursor_read_view)(info->read_view);
+ (info->ht->set_cursor_read_view)(info->ht, thd, info->read_view);
join->fetch_limit+= num_rows;
@@ -454,7 +454,7 @@ Sensitive_cursor::fetch(ulong num_rows)
reset_thd(thd);
for (info= ht_info; info->read_view; info++)
- (info->ht->set_cursor_read_view)(0);
+ (info->ht->set_cursor_read_view)(info->ht, thd, 0);
if (error == NESTED_LOOP_CURSOR_LIMIT)
{
@@ -487,7 +487,7 @@ Sensitive_cursor::close()
for (Engine_info *info= ht_info; info->read_view; info++)
{
- (info->ht->close_cursor_read_view)(info->read_view);
+ (info->ht->close_cursor_read_view)(info->ht, thd, info->read_view);
info->read_view= 0;
info->ht= 0;
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 946a6233bc2..4dcbf9af4a0 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -112,7 +112,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (!table_list->updatable)
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
return -1;
}
@@ -228,7 +228,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
(table_list->view &&
check_view_insertability(thd, table_list)))
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
return -1;
}
@@ -1266,7 +1266,7 @@ err:
if (thd->lex->current_select)
thd->lex->current_select->no_error= 0; // Give error
table->file->print_error(error,MYF(0));
-
+
before_trg_err:
table->file->restore_auto_increment(prev_insert_id);
if (key)
@@ -1334,13 +1334,16 @@ public:
bool query_start_used, ignore, log_query;
bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
ulonglong first_successful_insert_id_in_prev_stmt;
+ ulonglong forced_insert_id;
+ ulong auto_increment_increment;
+ ulong auto_increment_offset;
timestamp_auto_set_type timestamp_field_type;
LEX_STRING query;
delayed_row(LEX_STRING const query_arg, enum_duplicates dup_arg,
bool ignore_arg, bool log_query_arg)
: record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg),
- query(query_arg)
+ forced_insert_id(0), query(query_arg)
{}
~delayed_row()
{
@@ -1698,6 +1701,7 @@ write_delayed(THD *thd,TABLE *table, enum_duplicates duplic,
{
delayed_row *row;
delayed_insert *di=thd->di;
+ const Discrete_interval *forced_auto_inc;
DBUG_ENTER("write_delayed");
DBUG_PRINT("enter", ("query = '%s' length %u", query.str, query.length));
@@ -1747,6 +1751,17 @@ write_delayed(THD *thd,TABLE *table, enum_duplicates duplic,
thd->first_successful_insert_id_in_prev_stmt;
row->timestamp_field_type= table->timestamp_field_type;
+ /* Copy session variables. */
+ row->auto_increment_increment= thd->variables.auto_increment_increment;
+ row->auto_increment_offset= thd->variables.auto_increment_offset;
+ /* Copy the next forced auto increment value, if any. */
+ if ((forced_auto_inc= thd->auto_inc_intervals_forced.get_next()))
+ {
+ row->forced_insert_id= forced_auto_inc->minimum();
+ DBUG_PRINT("delayed", ("transmitting auto_inc: %lu",
+ (ulong) row->forced_insert_id));
+ }
+
di->rows.push_back(row);
di->stacked_inserts++;
di->status=1;
@@ -1998,6 +2013,10 @@ pthread_handler_t handle_delayed_insert(void *arg)
MYSQL_LOCK *lock=thd->lock;
thd->lock=0;
pthread_mutex_unlock(&di->mutex);
+ /*
+ We need to release next_insert_id before unlocking. This is
+ enforced by handler::ha_external_lock().
+ */
di->table->file->ha_release_auto_increment();
mysql_unlock_tables(thd, lock);
di->group_count=0;
@@ -2014,6 +2033,10 @@ err:
rolled back. We only need to roll back a potential statement
transaction, since real transactions are rolled back in
close_thread_tables().
+
+ TODO: This is not true any more, table maps are generated on the
+ first call to ha_*_row() instead. Remove code that are used to
+ cover for the case outlined above.
*/
ha_rollback_stmt(thd);
@@ -2123,8 +2146,18 @@ bool delayed_insert::handle_inserts(void)
use values from the previous interval (of the previous rows).
*/
bool log_query= (row->log_query && row->query.str != NULL);
+ DBUG_PRINT("delayed", ("query: '%s' length: %u", row->query.str ?
+ row->query.str : "[NULL]", row->query.length));
if (log_query)
{
+ /*
+ This is the first value of an INSERT statement.
+ It is the right place to clear a forced insert_id.
+ This is usually done after the last value of an INSERT statement,
+ but we won't know this in the insert delayed thread. But before
+ the first value is sufficiently equivalent to after the last
+ value of the previous statement.
+ */
table->file->ha_release_auto_increment();
thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty();
}
@@ -2134,6 +2167,17 @@ bool delayed_insert::handle_inserts(void)
row->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
table->timestamp_field_type= row->timestamp_field_type;
+ /* Copy the session variables. */
+ thd.variables.auto_increment_increment= row->auto_increment_increment;
+ thd.variables.auto_increment_offset= row->auto_increment_offset;
+ /* Copy a forced insert_id, if any. */
+ if (row->forced_insert_id)
+ {
+ DBUG_PRINT("delayed", ("received auto_inc: %lu",
+ (ulong) row->forced_insert_id));
+ thd.force_one_auto_inc_interval(row->forced_insert_id);
+ }
+
info.ignore= row->ignore;
info.handle_duplicates= row->dup;
if (info.ignore ||
@@ -2211,6 +2255,7 @@ bool delayed_insert::handle_inserts(void)
/* This should never happen */
table->file->print_error(error,MYF(0));
sql_print_error("%s",thd.net.last_error);
+ DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop"));
goto err;
}
query_cache_invalidate3(&thd, table, 1);
@@ -2253,6 +2298,7 @@ bool delayed_insert::handle_inserts(void)
{ // This shouldn't happen
table->file->print_error(error,MYF(0));
sql_print_error("%s",thd.net.last_error);
+ DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop"));
goto err;
}
query_cache_invalidate3(&thd, table, 1);
@@ -2260,13 +2306,16 @@ bool delayed_insert::handle_inserts(void)
DBUG_RETURN(0);
err:
+ DBUG_EXECUTE("error", max_rows= 0;);
/* Remove all not used rows */
while ((row=rows.get()))
{
delete row;
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
stacked_inserts--;
+ DBUG_EXECUTE("error", max_rows++;);
}
+ DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows));
thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status);
pthread_mutex_lock(&mutex);
DBUG_RETURN(1);
@@ -2357,6 +2406,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_ENTER("select_insert::prepare");
unit= u;
+
/*
Since table in which we are going to insert is added to the first
select, LEX::current_select should point to the first select while
@@ -2586,58 +2636,56 @@ void select_insert::send_error(uint errcode,const char *err)
if (errcode != ER_UNKNOWN_ERROR && !thd->net.report_error)
my_message(errcode, err, MYF(0));
- if (!table)
+ /*
+ If the creation of the table failed (due to a syntax error, for
+ example), no table will have been opened and therefore 'table'
+ will be NULL. In that case, we still need to execute the rollback
+ and the end of the function to truncate the binary log, but we can
+ skip all the intermediate steps.
+ */
+ if (table)
{
/*
- This can only happen when using CREATE ... SELECT and the table was not
- created becasue of an syntax error
+ If we are not in prelocked mode, we end the bulk insert started
+ before.
*/
- DBUG_VOID_RETURN;
- }
- if (!thd->prelocked_mode)
- table->file->ha_end_bulk_insert();
- /*
- If at least one row has been inserted/modified and will stay in the table
- (the table doesn't have transactions) we must write to the binlog (and
- the error code will make the slave stop).
-
- For many errors (example: we got a duplicate key error while
- inserting into a MyISAM table), no row will be added to the table,
- so passing the error to the slave will not help since there will
- be an error code mismatch (the inserts will succeed on the slave
- with no error).
-
- If we are using row-based replication we have two cases where this
- code is executed: replication of CREATE-SELECT and replication of
- INSERT-SELECT.
-
- When replicating a CREATE-SELECT statement, we shall not write the
- events to the binary log and should thus not set
- OPTION_STATUS_NO_TRANS_UPDATE.
-
- When replicating INSERT-SELECT, we shall not write the events to
- the binary log for transactional table, but shall write all events
- if there is one or more writes to non-transactional tables. In
- this case, the OPTION_STATUS_NO_TRANS_UPDATE is set if there is a
- write to a non-transactional table, otherwise it is cleared.
- */
- if (info.copied || info.deleted || info.updated)
- {
- if (!table->file->has_transactions())
+ if (!thd->prelocked_mode)
+ table->file->ha_end_bulk_insert();
+
+ /*
+ If at least one row has been inserted/modified and will stay in
+ the table (the table doesn't have transactions) we must write to
+ the binlog (and the error code will make the slave stop).
+
+ For many errors (example: we got a duplicate key error while
+ inserting into a MyISAM table), no row will be added to the table,
+ so passing the error to the slave will not help since there will
+ be an error code mismatch (the inserts will succeed on the slave
+ with no error).
+
+ If table creation failed, the number of rows modified will also be
+ zero, so no check for that is made.
+ */
+ if (info.copied || info.deleted || info.updated)
{
- if (mysql_bin_log.is_open())
+ DBUG_ASSERT(table != NULL);
+ if (!table->file->has_transactions())
{
- thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
- table->file->has_transactions(), FALSE);
+ if (mysql_bin_log.is_open())
+ {
+ thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
+ table->file->has_transactions(), FALSE);
+ }
+ if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table &&
+ !can_rollback_data())
+ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ query_cache_invalidate3(thd, table, 1);
}
- if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table &&
- !can_rollback_data())
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
- query_cache_invalidate3(thd, table, 1);
}
+ table->file->ha_release_auto_increment();
}
+
ha_rollback_stmt(thd);
- table->file->ha_release_auto_increment();
DBUG_VOID_RETURN;
}
@@ -2645,8 +2693,11 @@ void select_insert::send_error(uint errcode,const char *err)
bool select_insert::send_eof()
{
int error,error2;
+ bool const trans_table= table->file->has_transactions();
ulonglong id;
DBUG_ENTER("select_insert::send_eof");
+ DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
+ trans_table, table->file->table_type()));
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
@@ -2666,9 +2717,8 @@ bool select_insert::send_eof()
are not logged in RBR)
- We are using statement based replication
*/
- if (!table->file->has_transactions() &&
- (!table->s->tmp_table ||
- !thd->current_stmt_binlog_row_based))
+ if (!trans_table &&
+ (!table->s->tmp_table || !thd->current_stmt_binlog_row_based))
thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
}
@@ -2684,11 +2734,22 @@ bool select_insert::send_eof()
thd->clear_error();
thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query, thd->query_length,
- table->file->has_transactions(), FALSE);
+ trans_table, FALSE);
+ }
+ /*
+ We will call ha_autocommit_or_rollback() also for
+ non-transactional tables under row-based replication: there might
+ be events in the binary logs transaction, and we need to write
+ them to the binary log.
+ */
+ if (trans_table || thd->current_stmt_binlog_row_based)
+ {
+ int const error2= ha_autocommit_or_rollback(thd, error);
+ if (error2 && !error)
+ error=error2;
}
- if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error)
- error=error2;
table->file->ha_release_auto_increment();
+
if (error)
{
table->file->print_error(error,MYF(0));
@@ -2885,14 +2946,19 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
class MY_HOOKS : public TABLEOP_HOOKS {
public:
MY_HOOKS(select_create *x) : ptr(x) { }
+
+ private:
virtual void do_prelock(TABLE **tables, uint count)
{
- if (ptr->get_thd()->current_stmt_binlog_row_based &&
- !(ptr->get_create_info()->options & HA_LEX_CREATE_TMP_TABLE))
- ptr->binlog_show_create_table(tables, count);
+ TABLE const *const table = *tables;
+ if (ptr->get_thd()->current_stmt_binlog_row_based &&
+ table->s->tmp_table == NO_TMP_TABLE &&
+ !ptr->get_create_info()->table_existed)
+ {
+ ptr->binlog_show_create_table(tables, count);
+ }
}
- private:
select_create *ptr;
};
@@ -2901,6 +2967,20 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
#endif
unit= u;
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Start a statement transaction before the create if we are creating
+ a non-temporary table and are using row-based replication for the
+ statement.
+ */
+ if ((thd->lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) == 0 &&
+ thd->current_stmt_binlog_row_based)
+ {
+ thd->binlog_start_trans_and_stmt();
+ }
+#endif
+
if (!(table= create_table_from_items(thd, create_info, create_table,
extra_fields, keys, &values,
&thd->extra_lock, hook_ptr)))
@@ -3048,8 +3128,17 @@ void select_create::abort()
table->s->version= 0;
hash_delete(&open_cache,(byte*) table);
if (!create_info->table_existed)
+ {
quick_rm_table(table_type, create_table->db,
create_table->table_name, 0);
+ /*
+ We roll back the statement, including truncating the
+ transaction cache of the binary log, if the statement
+ failed.
+ */
+ if (thd->current_stmt_binlog_row_based)
+ ha_rollback_stmt(thd);
+ }
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
broadcast_refresh();
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 3f3fde03bce..d94c45c4bdd 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1722,7 +1722,8 @@ bool st_lex::can_be_merged()
unit= unit->next_unit())
{
if (unit->first_select()->parent_lex == this &&
- (unit->item == 0 || unit->item->place() != IN_WHERE))
+ (unit->item == 0 ||
+ (unit->item->place() != IN_WHERE && unit->item->place() != IN_ON)))
{
selects_allow_merge= 0;
break;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 8cbc7bc46f6..fc7ed7ff673 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1047,8 +1047,8 @@ static int check_connection(THD *thd)
char *passwd= strend(user)+1;
uint user_len= passwd - user - 1;
char *db= passwd;
- char db_buff[NAME_BYTE_LEN + 1]; // buffer to store db in utf8
- char user_buff[USERNAME_BYTE_LENGTH + 1]; // buffer to store user in utf8
+ char db_buff[NAME_LEN + 1]; // buffer to store db in utf8
+ char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8
uint dummy_errors;
/*
@@ -1724,7 +1724,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
password. New clients send the size (1 byte) + string (not null
terminated, so also '\0' for empty string).
*/
- char db_buff[NAME_BYTE_LEN+1]; // buffer to store db in utf8
+ char db_buff[NAME_LEN+1]; // buffer to store db in utf8
char *db= passwd;
uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
*passwd++ : strlen(passwd);
@@ -7783,7 +7783,6 @@ LEX_USER *get_current_user(THD *thd, LEX_USER *user)
SYNOPSIS
check_string_length()
- cs string charset
str string to be checked
err_msg error message to be displayed if the string is too long
max_length max length
@@ -7793,13 +7792,13 @@ LEX_USER *get_current_user(THD *thd, LEX_USER *user)
TRUE the passed string is longer than max_length
*/
-bool check_string_length(CHARSET_INFO *cs, LEX_STRING *str,
- const char *err_msg, uint max_length)
+bool check_string_length(LEX_STRING *str, const char *err_msg,
+ uint max_length)
{
- if (cs->cset->charpos(cs, str->str, str->str + str->length,
- max_length) >= str->length)
- return FALSE;
+ if (str->length <= max_length)
+ return FALSE;
my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_length);
+
return TRUE;
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index a20e5dd512b..8df527fd25b 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2173,18 +2173,17 @@ close_file:
SYNOPSIS
partition_key_modified
table TABLE object for which partition fields are set-up
- fields A list of the to be modifed
+ fields Bitmap representing fields to be modified
RETURN VALUES
TRUE Need special handling of UPDATE
FALSE Normal UPDATE handling is ok
*/
-bool partition_key_modified(TABLE *table, List<Item> &fields)
+bool partition_key_modified(TABLE *table, const MY_BITMAP *fields)
{
- List_iterator_fast<Item> f(fields);
+ Field **fld;
partition_info *part_info= table->part_info;
- Item_field *item_field;
DBUG_ENTER("partition_key_modified");
if (!part_info)
@@ -2192,9 +2191,8 @@ bool partition_key_modified(TABLE *table, List<Item> &fields)
if (table->s->db_type->partition_flags &&
(table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
DBUG_RETURN(FALSE);
- f.rewind();
- while ((item_field=(Item_field*) f++))
- if (item_field->field->flags & FIELD_IN_PART_FUNC_FLAG)
+ for (fld= part_info->full_part_field_array; *fld; fld++)
+ if (bitmap_is_set(fields, (*fld)->field_index))
DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index c5b930162a9..4372c26a851 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -70,7 +70,7 @@ bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind);
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc,
bool show_partition_options);
-bool partition_key_modified(TABLE *table, List<Item> &fields);
+bool partition_key_modified(TABLE *table, const MY_BITMAP *fields);
void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec,
part_id_range *part_spec);
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 97c28dd1fa5..34fb447792e 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -759,14 +759,16 @@ void plugin_load(void)
while (!(error= read_record_info.read_record(&read_record_info)))
{
DBUG_PRINT("info", ("init plugin record"));
- LEX_STRING name, dl;
- name.str= get_field(&mem, table->field[0]);
- name.length= strlen(name.str);
- dl.str= get_field(&mem, table->field[1]);
- dl.length= strlen(dl.str);
+ String str_name, str_dl;
+ get_field(&mem, table->field[0], &str_name);
+ get_field(&mem, table->field[1], &str_dl);
+
+ LEX_STRING name= {(char *)str_name.ptr(), str_name.length()};
+ LEX_STRING dl= {(char *)str_dl.ptr(), str_dl.length()};
+
if (plugin_add(&name, &dl, REPORT_TO_LOG))
- DBUG_PRINT("warning", ("Couldn't load plugin named '%s' with soname '%s'.",
- name.str, dl.str));
+ sql_print_warning("Couldn't load plugin named '%s' with soname '%s'.",
+ str_name.c_ptr(), str_dl.c_ptr());
}
if (error > 0)
sql_print_error(ER(ER_GET_ERRNO), my_errno);
@@ -862,8 +864,8 @@ my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING
DBUG_RETURN(FALSE);
deinit:
plugin_deinitialize(tmp);
-err:
plugin_del(tmp);
+err:
rw_unlock(&THR_LOCK_plugin);
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 45d3254dad0..789de64da85 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -24,7 +24,7 @@ typedef struct st_slave_info
uint32 server_id;
uint32 rpl_recovery_rank, master_id;
char host[HOSTNAME_LENGTH+1];
- char user[USERNAME_BYTE_LENGTH+1];
+ char user[USERNAME_LENGTH+1];
char password[MAX_PASSWORD_LENGTH+1];
uint16 port;
THD* thd;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 63b81c10067..ed91719fd46 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -70,7 +70,7 @@ static int join_tab_cmp_straight(const void* ptr1, const void* ptr2);
static void find_best(JOIN *join,table_map rest_tables,uint index,
double record_count,double read_time);
static uint cache_record_length(JOIN *join,uint index);
-static double prev_record_reads(JOIN *join,table_map found_ref);
+static double prev_record_reads(JOIN *join, uint idx, table_map found_ref);
static bool get_best_combination(JOIN *join);
static store_key *get_store_key(THD *thd,
KEYUSE *keyuse, table_map used_tables,
@@ -1137,17 +1137,28 @@ JOIN::optimize()
tmp_table_param.hidden_field_count= (all_fields.elements -
fields_list.elements);
+ ORDER *tmp_group= ((!simple_group && !procedure &&
+ !(test_flags & TEST_NO_KEY_GROUP)) ? group_list :
+ (ORDER*) 0);
+ /*
+ Pushing LIMIT to the temporary table creation is not applicable
+ when there is ORDER BY or GROUP BY or there is no GROUP BY, but
+ there are aggregate functions, because in all these cases we need
+ all result rows.
+ */
+ ha_rows tmp_rows_limit= ((order == 0 || skip_sort_order ||
+ test(select_options & OPTION_BUFFER_RESULT)) &&
+ !tmp_group &&
+ !thd->lex->current_select->with_sum_func) ?
+ select_limit : HA_POS_ERROR;
+
if (!(exec_tmp_table1=
create_tmp_table(thd, &tmp_table_param, all_fields,
- ((!simple_group && !procedure &&
- !(test_flags & TEST_NO_KEY_GROUP)) ?
- group_list : (ORDER*) 0),
+ tmp_group,
group_list ? 0 : select_distinct,
group_list && simple_group,
select_options,
- (order == 0 || skip_sort_order ||
- test(select_options & OPTION_BUFFER_RESULT)) ?
- select_limit : HA_POS_ERROR,
+ tmp_rows_limit,
(char *) "")))
DBUG_RETURN(1);
@@ -3426,6 +3437,7 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].table= table;
join->positions[idx].key=key;
join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].ref_depend_map= 0;
/* Move the const table as down as possible in best_ref */
JOIN_TAB **pos=join->best_ref+idx+1;
@@ -3483,6 +3495,7 @@ best_access_path(JOIN *join,
double best= DBL_MAX;
double best_time= DBL_MAX;
double records= DBL_MAX;
+ table_map best_ref_depends_map;
double tmp;
ha_rows rec;
@@ -3511,13 +3524,20 @@ best_access_path(JOIN *join,
/* Calculate how many key segments of the current key we can use */
start_key= keyuse;
- do
- { /* for each keypart */
+
+ do /* For each keypart */
+ {
uint keypart= keyuse->keypart;
table_map best_part_found_ref= 0;
double best_prev_record_reads= DBL_MAX;
- do
+
+ do /* For each way to access the keypart */
{
+
+ /*
+ if 1. expression doesn't refer to forward tables
+ 2. we won't get two ref-or-null's
+ */
if (!(remaining_tables & keyuse->used_tables) &&
!(ref_or_null_part && (keyuse->optimize &
KEY_OPTIMIZE_REF_OR_NULL)))
@@ -3525,8 +3545,9 @@ best_access_path(JOIN *join,
found_part|= keyuse->keypart_map;
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
- double tmp= prev_record_reads(join, (found_ref |
- keyuse->used_tables));
+
+ double tmp= prev_record_reads(join, idx, (found_ref |
+ keyuse->used_tables));
if (tmp < best_prev_record_reads)
{
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
@@ -3565,7 +3586,7 @@ best_access_path(JOIN *join,
Really, there should be records=0.0 (yes!)
but 1.0 would be probably safer
*/
- tmp= prev_record_reads(join, found_ref);
+ tmp= prev_record_reads(join, idx, found_ref);
records= 1.0;
}
else
@@ -3580,7 +3601,7 @@ best_access_path(JOIN *join,
max_key_part= (uint) ~0;
if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)
{
- tmp = prev_record_reads(join, found_ref);
+ tmp = prev_record_reads(join, idx, found_ref);
records=1.0;
}
else
@@ -3717,7 +3738,30 @@ best_access_path(JOIN *join,
{
/* Check if we have statistic about the distribution */
if ((records= keyinfo->rec_per_key[max_key_part-1]))
+ {
+ /*
+ Fix for the case where the index statistics is too
+ optimistic: If
+ (1) We're considering ref(const) and there is quick select
+ on the same index,
+ (2) and that quick select uses more keyparts (i.e. it will
+ scan equal/smaller interval then this ref(const))
+ (3) and E(#rows) for quick select is higher then our
+ estimate,
+ Then
+ We'll use E(#rows) from quick select.
+
+ Q: Why do we choose to use 'ref'? Won't quick select be
+ cheaper in some cases ?
+ TODO: figure this out and adjust the plan choice if needed.
+ */
+ if (!found_ref && table->quick_keys.is_set(key) && // (1)
+ table->quick_key_parts[key] > max_key_part && // (2)
+ records < (double)table->quick_rows[key]) // (3)
+ records= (double)table->quick_rows[key];
+
tmp= records;
+ }
else
{
/*
@@ -3810,6 +3854,7 @@ best_access_path(JOIN *join,
best_records= records;
best_key= start_key;
best_max_key_part= max_key_part;
+ best_ref_depends_map= found_ref;
}
}
records= best_records;
@@ -3938,6 +3983,8 @@ best_access_path(JOIN *join,
best= tmp;
records= rows2double(rnd_records);
best_key= 0;
+ /* range/index_merge/ALL/index access method are "independent", so: */
+ best_ref_depends_map= 0;
}
}
@@ -3946,6 +3993,7 @@ best_access_path(JOIN *join,
join->positions[idx].read_time= best;
join->positions[idx].key= best_key;
join->positions[idx].table= s;
+ join->positions[idx].ref_depend_map= best_ref_depends_map;
if (!best_key &&
idx == join->const_tables &&
@@ -4713,17 +4761,85 @@ cache_record_length(JOIN *join,uint idx)
}
+/*
+ Get the number of different row combinations for subset of partial join
+
+ SYNOPSIS
+ prev_record_reads()
+ join The join structure
+ idx Number of tables in the partial join order (i.e. the
+ partial join order is in join->positions[0..idx-1])
+ found_ref Bitmap of tables for which we need to find # of distinct
+ row combinations.
+
+ DESCRIPTION
+ Given a partial join order (in join->positions[0..idx-1]) and a subset of
+ tables within that join order (specified in found_ref), find out how many
+ distinct row combinations of subset tables will be in the result of the
+ partial join order.
+
+ This is used as follows: Suppose we have a table accessed with a ref-based
+ method. The ref access depends on current rows of tables in found_ref.
+ We want to count # of different ref accesses. We assume two ref accesses
+ will be different if at least one of access parameters is different.
+ Example: consider a query
+
+ SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field
+
+ and a join order:
+ t1, ref access on t1.key=c1
+ t2, ref access on t2.key=c2
+ t3, ref access on t3.key=t1.field
+
+ For t1: n_ref_scans = 1, n_distinct_ref_scans = 1
+ For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1
+ For t3: n_ref_scans = records_read(t1)*records_read(t2)
+ n_distinct_ref_scans = #records_read(t1)
+
+ The reason for having this function (at least the latest version of it)
+ is that we need to account for buffering in join execution.
+
+ An edge-case example: if we have a non-first table in join accessed via
+ ref(const) or ref(param) where there is a small number of different
+ values of param, then the access will likely hit the disk cache and will
+ not require any disk seeks.
+
+ The proper solution would be to assume an LRU disk cache of some size,
+ calculate probability of cache hits, etc. For now we just count
+ identical ref accesses as one.
+
+ RETURN
+ Expected number of row combinations
+*/
+
static double
-prev_record_reads(JOIN *join,table_map found_ref)
+prev_record_reads(JOIN *join, uint idx, table_map found_ref)
{
double found=1.0;
- found_ref&= ~OUTER_REF_TABLE_BIT;
- for (POSITION *pos=join->positions ; found_ref ; pos++)
+ POSITION *pos_end= join->positions - 1;
+ for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--)
{
if (pos->table->table->map & found_ref)
{
- found_ref&= ~pos->table->table->map;
- found*=pos->records_read;
+ found_ref|= pos->ref_depend_map;
+ /*
+ For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table
+ with no matching row we will get position[t2].records_read==0.
+ Actually the size of output is one null-complemented row, therefore
+ we will use value of 1 whenever we get records_read==0.
+
+ Note
+ - the above case can't occur if inner part of outer join has more
+ than one table: table with no matches will not be marked as const.
+
+ - Ideally we should add 1 to records_read for every possible null-
+ complemented row. We're not doing it because: 1. it will require
+ non-trivial code and add overhead. 2. The value of records_read
+ is an inprecise estimate and adding 1 (or, in the worst case,
+ #max_nested_outer_joins=64-1) will not make it any more precise.
+ */
+ if (pos->records_read)
+ found*= pos->records_read;
}
}
return found;
@@ -9197,6 +9313,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
thd->variables.max_heap_table_size) :
thd->variables.tmp_table_size)/ share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
+ /*
+ Push the LIMIT clause to the temporary table creation, so that we
+ materialize only up to 'rows_limit' records instead of all result records.
+ */
+ set_if_smaller(share->max_rows, rows_limit);
+ param->end_write_records= rows_limit;
+
keyinfo= param->keyinfo;
if (group)
@@ -9329,19 +9452,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
}
- /*
- Push the LIMIT clause to the temporary table creation, so that we
- materialize only up to 'rows_limit' records instead of all result records.
- This optimization is not applicable when there is GROUP BY or there is
- no GROUP BY, but there are aggregate functions, because both must be
- computed for all result rows.
- */
- if (!group && !thd->lex->current_select->with_sum_func)
- {
- set_if_smaller(table->s->max_rows, rows_limit);
- param->end_write_records= rows_limit;
- }
-
if (thd->is_fatal_error) // If end of memory
goto err; /* purecov: inspected */
share->db_record_offset= 1;
@@ -10473,6 +10583,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
tab->info="const row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
@@ -10498,6 +10609,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 3b0c312757d..eb6d2d5d34f 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -176,7 +176,13 @@ enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool
*/
typedef struct st_position
{
+ /*
+ The "fanout": number of output rows that will be produced (after
+ pushed down selection condition is applied) per each row combination of
+ previous tables.
+ */
double records_read;
+
/*
Cost accessing the table in course of the entire complete join execution,
i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times
@@ -184,7 +190,15 @@ typedef struct st_position
*/
double read_time;
JOIN_TAB *table;
+
+ /*
+ NULL - 'index' or 'range' or 'index_merge' or 'ALL' access is used.
+ Other - [eq_]ref[_or_null] access is used. Pointer to {t.keypart1 = expr}
+ */
KEYUSE *key;
+
+ /* If ref-based access is used: bitmap of tables this table depends on */
+ table_map ref_depend_map;
} POSITION;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ce7832569fe..30f494a48ac 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -211,6 +211,22 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin,
else
table->field[8]->set_null();
+ switch (plug->license) {
+ case PLUGIN_LICENSE_GPL:
+ table->field[9]->store(PLUGIN_LICENSE_GPL_STRING,
+ strlen(PLUGIN_LICENSE_GPL_STRING), cs);
+ break;
+ case PLUGIN_LICENSE_BSD:
+ table->field[9]->store(PLUGIN_LICENSE_BSD_STRING,
+ strlen(PLUGIN_LICENSE_BSD_STRING), cs);
+ break;
+ default:
+ table->field[9]->store(PLUGIN_LICENSE_PROPRIETARY_STRING,
+ strlen(PLUGIN_LICENSE_PROPRIETARY_STRING), cs);
+ break;
+ }
+ table->field[9]->set_notnull();
+
return schema_table_store_record(thd, table);
}
@@ -4999,7 +5015,7 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin,
(run_hton_fill_schema_files_args *) arg;
handlerton *hton= (handlerton *)plugin->data;
if(hton->fill_files_table && hton->state == SHOW_OPTION_YES)
- hton->fill_files_table(thd, args->tables, args->cond);
+ hton->fill_files_table(hton, thd, args->tables, args->cond);
return false;
}
@@ -5533,7 +5549,7 @@ ST_FIELD_INFO partitions_fields_info[]=
ST_FIELD_INFO variables_fields_info[]=
{
{"Variable_name", 80, MYSQL_TYPE_STRING, 0, 0, "Variable_name"},
- {"Value", 255, MYSQL_TYPE_STRING, 0, 0, "Value"},
+ {"Value", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, "Value"},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
};
@@ -5579,6 +5595,7 @@ ST_FIELD_INFO plugin_fields_info[]=
{"PLUGIN_LIBRARY_VERSION", 20, MYSQL_TYPE_STRING, 0, 1, 0},
{"PLUGIN_AUTHOR", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
{"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License"},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 4658ebb6cf4..7796bc35a79 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1650,7 +1650,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
}
}
- if (lock_table_names(thd, tables))
+ if (!drop_temporary && lock_table_names(thd, tables))
DBUG_RETURN(1);
/* Don't give warnings for not found errors, as we already generate notes */
@@ -1835,7 +1835,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
}
}
- unlock_table_names(thd, tables, (TABLE_LIST*) 0);
+ if (!drop_temporary)
+ unlock_table_names(thd, tables, (TABLE_LIST*) 0);
thd->no_warnings_for_error= 0;
DBUG_RETURN(error);
}
diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc
index 13dfb491af4..470fa5bc862 100644
--- a/sql/sql_tablespace.cc
+++ b/sql/sql_tablespace.cc
@@ -21,7 +21,7 @@
int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
{
int error= HA_ADMIN_NOT_IMPLEMENTED;
- const handlerton *hton= ts_info->storage_engine;
+ handlerton *hton= ts_info->storage_engine;
DBUG_ENTER("mysql_alter_tablespace");
/*
@@ -42,7 +42,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
if (hton->alter_tablespace)
{
- if ((error= hton->alter_tablespace(thd, ts_info)))
+ if ((error= hton->alter_tablespace(hton, thd, ts_info)))
{
if (error == HA_ADMIN_NOT_IMPLEMENTED)
{
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index b2464745f7c..09576f5e523 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -116,11 +116,6 @@ public:
bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]);
}
- bool has_before_update_triggers()
- {
- return test(bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE]);
- }
-
void set_table(TABLE *new_table);
void mark_fields_used(trg_event_type event);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 3130220515a..51ec287a6cb 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -25,7 +25,7 @@
#include "sp_head.h"
#include "sql_trigger.h"
-static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields);
+static bool safe_update_on_fly(JOIN_TAB *join_tab);
/* Return 0 if row hasn't changed */
@@ -271,13 +271,16 @@ int mysql_update(THD *thd,
}
}
init_ftfuncs(thd, select_lex, 1);
+
+ table->mark_columns_needed_for_update();
+
/* Check if we are modifying a key that we are used to search with */
if (select && select->quick)
{
used_index= select->quick->index;
used_key_is_modified= (!select->quick->unique_key_range() &&
- select->quick->check_if_keys_used(&fields));
+ select->quick->is_keys_used(table->write_set));
}
else
{
@@ -285,12 +288,13 @@ int mysql_update(THD *thd,
if (used_index == MAX_KEY) // no index for sort order
used_index= table->file->key_used_on_scan;
if (used_index != MAX_KEY)
- used_key_is_modified= check_if_key_used(table, used_index, fields);
+ used_key_is_modified= is_key_used(table, used_index, table->write_set);
}
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (used_key_is_modified || order ||
- partition_key_modified(table, fields))
+ partition_key_modified(table, table->write_set))
#else
if (used_key_is_modified || order)
#endif
@@ -449,8 +453,6 @@ int mysql_update(THD *thd,
MODE_STRICT_ALL_TABLES)));
will_batch= !table->file->start_bulk_update();
- table->mark_columns_needed_for_update();
-
/*
We can use compare_record() to optimize away updates if
the table handler is returning all columns OR if
@@ -1215,9 +1217,11 @@ multi_update::initialize_tables(JOIN *join)
TMP_TABLE_PARAM *tmp_param;
table->mark_columns_needed_for_update();
+ if (ignore)
+ table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (table == main_table) // First table in join
{
- if (safe_update_on_fly(join->join_tab, &temp_fields))
+ if (safe_update_on_fly(join->join_tab))
{
table_to_update= main_table; // Update table on the fly
continue;
@@ -1275,7 +1279,6 @@ multi_update::initialize_tables(JOIN *join)
SYNOPSIS
safe_update_on_fly
join_tab How table is used in join
- fields Fields that are updated
NOTES
We can update the first table in join on the fly if we know that
@@ -1288,9 +1291,8 @@ multi_update::initialize_tables(JOIN *join)
- We are doing a range scan and we don't update the scan key or
the primary key for a clustered table handler.
- When checking for above cases we also should take into account that
- BEFORE UPDATE trigger potentially may change value of any field in row
- being updated.
+ This function gets information about fields to be updated from
+ the TABLE::write_set bitmap.
WARNING
This code is a bit dependent of how make_join_readinfo() works.
@@ -1300,7 +1302,7 @@ multi_update::initialize_tables(JOIN *join)
1 Safe to update
*/
-static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
+static bool safe_update_on_fly(JOIN_TAB *join_tab)
{
TABLE *table= join_tab->table;
switch (join_tab->type) {
@@ -1310,21 +1312,15 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
return TRUE; // At most one matching row
case JT_REF:
case JT_REF_OR_NULL:
- return !check_if_key_used(table, join_tab->ref.key, *fields) &&
- !(table->triggers &&
- table->triggers->has_before_update_triggers());
+ return !is_key_used(table, join_tab->ref.key, table->write_set);
case JT_ALL:
/* If range search on index */
if (join_tab->quick)
- return !join_tab->quick->check_if_keys_used(fields) &&
- !(table->triggers &&
- table->triggers->has_before_update_triggers());
+ return !join_tab->quick->is_keys_used(table->write_set);
/* If scanning in clustered key */
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
table->s->primary_key < MAX_KEY)
- return !check_if_key_used(table, table->s->primary_key, *fields) &&
- !(table->triggers &&
- table->triggers->has_before_update_triggers());
+ return !is_key_used(table, table->s->primary_key, table->write_set);
return TRUE;
default:
break; // Avoid compler warning
@@ -1337,7 +1333,11 @@ multi_update::~multi_update()
{
TABLE_LIST *table;
for (table= update_tables ; table; table= table->next_local)
+ {
table->table->no_keyread= table->table->no_cache= 0;
+ if (ignore)
+ table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ }
if (tmp_tables)
{
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 187d289cb16..7f6d935ff5e 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1603,7 +1603,7 @@ bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view)
list->push_back(fld);
else
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "INSERT");
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT");
DBUG_RETURN(TRUE);
}
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index fd53983c33f..26dea00029a 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1353,7 +1353,7 @@ event_tail:
Lex->sql_command= SQLCOM_CREATE_EVENT;
Lex->expr_allows_subselect= TRUE;
}
-
+ ;
ev_schedule_time: EVERY_SYM expr interval
{
@@ -1366,7 +1366,7 @@ ev_schedule_time: EVERY_SYM expr interval
{
Lex->event_parse_data->item_execute_at= $2;
}
- ;
+ ;
opt_ev_status: /* empty */ { $$= 0; }
| ENABLE_SYM
@@ -6853,11 +6853,13 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $3))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
add_join_on($3,$6);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref STRAIGHT_JOIN table_factor
ON
@@ -6866,12 +6868,14 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $3))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
$3->straight=1;
add_join_on($3,$6);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref normal_join table_ref
USING
@@ -6895,6 +6899,7 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $5))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
@@ -6902,6 +6907,7 @@ join_table:
Lex->pop_context();
$5->outer_join|=JOIN_TYPE_LEFT;
$$=$5;
+ Select->parsing_place= NO_MATTER;
}
| table_ref LEFT opt_outer JOIN_SYM table_factor
{
@@ -6926,6 +6932,7 @@ join_table:
/* Change the current name resolution context to a local context. */
if (push_new_name_resolution_context(YYTHD, $1, $5))
YYABORT;
+ Select->parsing_place= IN_ON;
}
expr
{
@@ -6934,6 +6941,7 @@ join_table:
YYABORT;
add_join_on($$, $8);
Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
}
| table_ref RIGHT opt_outer JOIN_SYM table_factor
{
@@ -9243,7 +9251,7 @@ user:
$$->host.str= (char *) "%";
$$->host.length= 1;
- if (check_string_length(system_charset_info, &$$->user,
+ if (check_string_length(&$$->user,
ER(ER_USERNAME), USERNAME_LENGTH))
YYABORT;
}
@@ -9254,9 +9262,9 @@ user:
YYABORT;
$$->user = $1; $$->host=$3;
- if (check_string_length(system_charset_info, &$$->user,
+ if (check_string_length(&$$->user,
ER(ER_USERNAME), USERNAME_LENGTH) ||
- check_string_length(&my_charset_latin1, &$$->host,
+ check_string_length(&$$->host,
ER(ER_HOSTNAME), HOSTNAME_LENGTH))
YYABORT;
}
diff --git a/sql/stacktrace.c b/sql/stacktrace.c
index 43f35c452f7..a2fe2ab88f1 100644
--- a/sql/stacktrace.c
+++ b/sql/stacktrace.c
@@ -222,7 +222,7 @@ terribly wrong...\n");
fprintf(stderr, "Stack trace seems successful - bottom reached\n");
end:
- fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/Using_stack_trace.html and follow instructions on how to resolve the stack trace. Resolved\n\
+ fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/using-stack-trace.html and follow instructions on how to resolve the stack trace. Resolved\n\
stack trace is much more helpful in diagnosing the problem, so please do \n\
resolve it\n");
}
diff --git a/sql/table.cc b/sql/table.cc
index 834c2ed9c82..5b41ad48696 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2257,7 +2257,7 @@ char *get_field(MEM_ROOT *mem, Field *field)
bool check_db_name(char *name)
{
- uint name_length= 0; // name length in symbols
+ char *start= name;
/* Used to catch empty names and names with end space */
bool last_char_is_space= TRUE;
@@ -2277,15 +2277,13 @@ bool check_db_name(char *name)
name += len;
continue;
}
- name_length++;
}
#else
last_char_is_space= *name==' ';
#endif
- name_length++;
name++;
}
- return last_char_is_space || name_length > NAME_LEN;
+ return last_char_is_space || (uint) (name - start) > NAME_LEN;
}
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 396ff4fba27..2ea572c782c 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -292,13 +292,19 @@ bool mysql_create_frm(THD *thd, const char *file_name,
goto err3;
{
- /* Unescape all UCS2 intervals: were escaped in pack_headers */
+ /*
+ Restore all UCS2 intervals.
+ HEX representation of them is not needed anymore.
+ */
List_iterator<create_field> it(create_fields);
create_field *field;
while ((field=it++))
{
- if (field->interval && field->charset->mbminlen > 1)
- unhex_type2(field->interval);
+ if (field->save_interval)
+ {
+ field->interval= field->save_interval;
+ field->save_interval= 0;
+ }
}
}
DBUG_RETURN(0);
@@ -589,18 +595,36 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
reclength=(uint) (field->offset+ data_offset + length);
n_length+= (ulong) strlen(field->field_name)+1;
field->interval_id=0;
+ field->save_interval= 0;
if (field->interval)
{
uint old_int_count=int_count;
if (field->charset->mbminlen > 1)
{
- /* Escape UCS2 intervals using HEX notation */
+ /*
+ Escape UCS2 intervals using HEX notation to avoid
+ problems with delimiters between enum elements.
+ As the original representation is still needed in
+ the function make_empty_rec to create a record of
+ filled with default values it is saved in save_interval
+ The HEX representation is created from this copy.
+ */
+ field->save_interval= field->interval;
+ field->interval= (TYPELIB*) sql_alloc(sizeof(TYPELIB));
+ *field->interval= *field->save_interval;
+ field->interval->type_names=
+ (const char **) sql_alloc(sizeof(char*) *
+ (field->interval->count+1));
+ field->interval->type_names[field->interval->count]= 0;
+ field->interval->type_lengths=
+ (uint *) sql_alloc(sizeof(uint) * field->interval->count);
+
for (uint pos= 0; pos < field->interval->count; pos++)
{
char *dst;
- uint length= field->interval->type_lengths[pos], hex_length;
- const char *src= field->interval->type_names[pos];
+ uint length= field->save_interval->type_lengths[pos], hex_length;
+ const char *src= field->save_interval->type_names[pos];
hex_length= length * 2;
field->interval->type_lengths[pos]= hex_length;
field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1);
@@ -852,7 +876,8 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
field->charset,
field->geom_type,
field->unireg_check,
- field->interval,
+ field->save_interval ? field->save_interval :
+ field->interval,
field->field_name);
if (!regfield)
goto err; // End of memory