summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am3
-rw-r--r--sql/field.cc15
-rw-r--r--sql/field.h4
-rw-r--r--sql/filesort.cc18
-rw-r--r--sql/ha_ndbcluster.cc44
-rw-r--r--sql/ha_ndbcluster.h1
-rw-r--r--sql/ha_ndbcluster_binlog.cc10
-rw-r--r--sql/ha_partition.cc81
-rw-r--r--sql/ha_partition.h1
-rw-r--r--sql/handler.h10
-rw-r--r--sql/item.h16
-rw-r--r--sql/item_cmpfunc.h14
-rw-r--r--sql/item_create.cc12
-rw-r--r--sql/item_func.h32
-rw-r--r--sql/item_strfunc.h14
-rw-r--r--sql/item_sum.cc21
-rw-r--r--sql/item_timefunc.h17
-rw-r--r--sql/item_xmlfunc.h1
-rw-r--r--sql/log.cc141
-rw-r--r--sql/log_event.cc17
-rw-r--r--sql/mysql_priv.h63
-rw-r--r--sql/mysqld.cc3
-rw-r--r--sql/partition_info.cc44
-rw-r--r--sql/partition_info.h7
-rw-r--r--sql/share/errmsg.txt68
-rw-r--r--sql/slave.cc308
-rw-r--r--sql/spatial.cc23
-rw-r--r--sql/sql_base.cc57
-rw-r--r--sql/sql_cache.cc12
-rw-r--r--sql/sql_class.cc142
-rw-r--r--sql/sql_class.h34
-rw-r--r--sql/sql_insert.cc8
-rw-r--r--sql/sql_load.cc1
-rw-r--r--sql/sql_parse.cc8
-rw-r--r--sql/sql_partition.cc43
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_show.cc28
-rw-r--r--sql/sql_sort.h9
-rw-r--r--sql/sql_yacc.yy10
-rw-r--r--sql/stacktrace.c9
-rw-r--r--sql/uniques.cc13
-rw-r--r--sql/unireg.h2
42 files changed, 982 insertions, 388 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 36d066758bc..ff33c1cae0c 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -43,8 +43,7 @@ mysqld_LDADD = libndb.la \
@pstack_libs@ \
@mysql_plugin_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
- $(yassl_libs) $(openssl_libs) \
- @MYSQLD_EXTRA_LIBS@
+ $(yassl_libs) $(openssl_libs) @MYSQLD_EXTRA_LIBS@
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_strfunc.h item_timefunc.h \
diff --git a/sql/field.cc b/sql/field.cc
index 55a93ed46d6..2cd81afca49 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1360,6 +1360,18 @@ bool Field::send_binary(Protocol *protocol)
}
+int Field::store(const char *to, uint length, CHARSET_INFO *cs,
+ enum_check_fields check_level)
+{
+ int res;
+ enum_check_fields old_check_level= table->in_use->count_cuted_fields;
+ table->in_use->count_cuted_fields= check_level;
+ res= store(to, length, cs);
+ table->in_use->count_cuted_fields= old_check_level;
+ return res;
+}
+
+
my_decimal *Field::val_decimal(my_decimal *decimal)
{
/* This never have to be called */
@@ -2316,6 +2328,7 @@ Field_new_decimal::Field_new_decimal(uchar *ptr_arg,
unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
+ set_if_smaller(precision, DECIMAL_MAX_PRECISION);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
(dec <= DECIMAL_MAX_SCALE));
bin_size= my_decimal_get_binary_size(precision, dec);
@@ -2332,6 +2345,7 @@ Field_new_decimal::Field_new_decimal(uint32 len_arg,
NONE, name, dec_arg, 0, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
+ set_if_smaller(precision, DECIMAL_MAX_PRECISION);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
(dec <= DECIMAL_MAX_SCALE));
bin_size= my_decimal_get_binary_size(precision, dec);
@@ -6457,6 +6471,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
is 2.
****************************************************************************/
+const uint Field_varstring::MAX_SIZE= UINT_MAX16;
int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
{
diff --git a/sql/field.h b/sql/field.h
index a0fe0f2e57e..8bf087c7ebd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -100,6 +100,8 @@ public:
virtual int store(longlong nr, bool unsigned_val)=0;
virtual int store_decimal(const my_decimal *d)=0;
virtual int store_time(MYSQL_TIME *ltime, timestamp_type t_type);
+ int store(const char *to, uint length, CHARSET_INFO *cs,
+ enum_check_fields check_level);
virtual double val_real(void)=0;
virtual longlong val_int(void)=0;
virtual my_decimal *val_decimal(my_decimal *);
@@ -1183,7 +1185,7 @@ public:
The maximum space available in a Field_varstring, in bytes. See
length_bytes.
*/
- static const uint MAX_SIZE= UINT_MAX16;
+ static const uint MAX_SIZE;
/* Store number of bytes used to store length (1 or 2) */
uint32 length_bytes;
Field_varstring(uchar *ptr_arg,
diff --git a/sql/filesort.cc b/sql/filesort.cc
index b1dfb4d5e71..e547940797b 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1128,6 +1128,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
BUFFPEK *buffpek;
QUEUE queue;
qsort2_cmp cmp;
+ void *first_cmp_arg;
volatile THD::killed_state *killed= &current_thd->killed;
THD::killed_state not_killable;
DBUG_ENTER("merge_buffers");
@@ -1152,9 +1153,18 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
/* The following will fire if there is not enough space in sort_buffer */
DBUG_ASSERT(maxcount!=0);
+ if (param->unique_buff)
+ {
+ cmp= param->compare;
+ first_cmp_arg= (void *) &param->cmp_context;
+ }
+ else
+ {
+ cmp= get_ptr_compare(sort_length);
+ first_cmp_arg= (void*) &sort_length;
+ }
if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
- (queue_compare) (cmp= get_ptr_compare(sort_length)),
- (void*) &sort_length))
+ (queue_compare) cmp, first_cmp_arg))
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
@@ -1207,7 +1217,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
buffpek= (BUFFPEK*) queue_top(&queue);
if (cmp) // Remove duplicates
{
- if (!(*cmp)(&sort_length, &(param->unique_buff),
+ if (!(*cmp)(first_cmp_arg, &(param->unique_buff),
(uchar**) &buffpek->key))
goto skip_duplicate;
memcpy(param->unique_buff, (uchar*) buffpek->key, rec_length);
@@ -1259,7 +1269,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
*/
if (cmp)
{
- if (!(*cmp)(&sort_length, &(param->unique_buff), (uchar**) &buffpek->key))
+ if (!(*cmp)(first_cmp_arg, &(param->unique_buff), (uchar**) &buffpek->key))
{
buffpek->key+= rec_length; // Remove duplicate
--buffpek->mem_count;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 24ce9946086..0b8ffd4a2fb 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2812,7 +2812,8 @@ int ha_ndbcluster::write_row(uchar *record)
if (unlikely(m_slow_path))
{
- if (!(thd->options & OPTION_BIN_LOG))
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ if (thd_ndb->trans_options & TNTO_NO_LOGGING)
op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
else if (thd->slave_thread)
op->setAnyValue(thd->server_id);
@@ -3101,7 +3102,8 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
if (unlikely(m_slow_path))
{
- if (!(thd->options & OPTION_BIN_LOG))
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ if (thd_ndb->trans_options & TNTO_NO_LOGGING)
op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
else if (thd->slave_thread)
op->setAnyValue(thd->server_id);
@@ -3168,7 +3170,8 @@ int ha_ndbcluster::delete_row(const uchar *record)
if (unlikely(m_slow_path))
{
- if (!(thd->options & OPTION_BIN_LOG))
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ if (thd_ndb->trans_options & TNTO_NO_LOGGING)
((NdbOperation *)trans->getLastDefinedOperation())->
setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
else if (thd->slave_thread)
@@ -3207,7 +3210,8 @@ int ha_ndbcluster::delete_row(const uchar *record)
if (unlikely(m_slow_path))
{
- if (!(thd->options & OPTION_BIN_LOG))
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ if (thd_ndb->trans_options & TNTO_NO_LOGGING)
op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
else if (thd->slave_thread)
op->setAnyValue(thd->server_id);
@@ -4385,8 +4389,13 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
thd_ndb->query_state&= NDB_QUERY_NORMAL;
thd_ndb->trans_options= 0;
thd_ndb->m_slow_path= FALSE;
- if (thd->slave_thread ||
- !(thd->options & OPTION_BIN_LOG))
+ if (!(thd->options & OPTION_BIN_LOG) ||
+ thd->variables.binlog_format == BINLOG_FORMAT_STMT)
+ {
+ thd_ndb->trans_options|= TNTO_NO_LOGGING;
+ thd_ndb->m_slow_path= TRUE;
+ }
+ else if (thd->slave_thread)
thd_ndb->m_slow_path= TRUE;
trans_register_ha(thd, FALSE, ndbcluster_hton);
}
@@ -4406,8 +4415,13 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
thd_ndb->query_state&= NDB_QUERY_NORMAL;
thd_ndb->trans_options= 0;
thd_ndb->m_slow_path= FALSE;
- if (thd->slave_thread ||
- !(thd->options & OPTION_BIN_LOG))
+ if (!(thd->options & OPTION_BIN_LOG) ||
+ thd->variables.binlog_format == BINLOG_FORMAT_STMT)
+ {
+ thd_ndb->trans_options|= TNTO_NO_LOGGING;
+ thd_ndb->m_slow_path= TRUE;
+ }
+ else if (thd->slave_thread)
thd_ndb->m_slow_path= TRUE;
trans_register_ha(thd, TRUE, ndbcluster_hton);
@@ -8961,6 +8975,14 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
thd->main_security_ctx.master_access= ~0;
thd->main_security_ctx.priv_user = 0;
+ CHARSET_INFO *charset_connection;
+ charset_connection= get_charset_by_csname("utf8",
+ MY_CS_PRIMARY, MYF(MY_WME));
+ thd->variables.character_set_client= charset_connection;
+ thd->variables.character_set_results= charset_connection;
+ thd->variables.collation_connection= charset_connection;
+ thd->update_charset();
+
/* Signal successful initialization */
ndb_util_thread_running= 1;
pthread_cond_signal(&COND_ndb_util_ready);
@@ -10118,6 +10140,10 @@ static int ndbcluster_fill_files_table(handlerton *hton,
{
if (ndberr.classification == NdbError::SchemaError)
continue;
+
+ if (ndberr.classification == NdbError::UnknownResultError)
+ continue;
+
ERR_RETURN(ndberr);
}
NdbDictionary::Tablespace ts= dict->getTablespace(df.getTablespace());
@@ -10197,6 +10223,8 @@ static int ndbcluster_fill_files_table(handlerton *hton,
{
if (ndberr.classification == NdbError::SchemaError)
continue;
+ if (ndberr.classification == NdbError::UnknownResultError)
+ continue;
ERR_RETURN(ndberr);
}
NdbDictionary::LogfileGroup lfg=
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 5c6646f68af..4f5a1359d2d 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -178,6 +178,7 @@ enum THD_NDB_OPTIONS
enum THD_NDB_TRANS_OPTIONS
{
TNTO_INJECTED_APPLY_STATUS= 1 << 0
+ ,TNTO_NO_LOGGING= 1 << 1
};
struct Ndb_local_table_statistics {
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index a724d1150dc..f388b60f3d5 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -3674,15 +3674,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
if (opt_bin_log)
{
- if (global_system_variables.binlog_format == BINLOG_FORMAT_ROW ||
- global_system_variables.binlog_format == BINLOG_FORMAT_MIXED)
- {
- ndb_binlog_running= TRUE;
- }
- else
- {
- sql_print_error("NDB: only row based binary logging is supported");
- }
+ ndb_binlog_running= TRUE;
}
/* Thread start up completed */
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index d874525c4ad..e4924e8e8f2 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2961,8 +2961,34 @@ int ha_partition::rnd_init(bool scan)
uint32 part_id;
DBUG_ENTER("ha_partition::rnd_init");
- include_partition_fields_in_used_fields();
-
+ /*
+ For operations that may need to change data, we may need to extend
+ read_set.
+ */
+ if (m_lock_type == F_WRLCK)
+ {
+ /*
+ If write_set contains any of the fields used in partition and
+ subpartition expression, we need to set all bits in read_set because
+ the row may need to be inserted in a different [sub]partition. In
+ other words update_row() can be converted into write_row(), which
+ requires a complete record.
+ */
+ if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+ table->write_set))
+ bitmap_set_all(table->read_set);
+ else
+ {
+ /*
+ Some handlers only read fields as specified by the bitmap for the
+ read set. For partitioned handlers we always require that the
+ fields of the partition functions are read such that we can
+ calculate the partition id to place updated and deleted records.
+ */
+ bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+ }
+ }
+
/* Now we see what the index of our first important partition is */
DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
(long) m_part_info->used_partitions.bitmap));
@@ -3118,7 +3144,7 @@ int ha_partition::rnd_next(uchar *buf)
continue; // Probably MyISAM
if (result != HA_ERR_END_OF_FILE)
- break; // Return error
+ goto end_dont_reset_start_part; // Return error
/* End current partition */
late_extra_no_cache(part_id);
@@ -3144,6 +3170,7 @@ int ha_partition::rnd_next(uchar *buf)
end:
m_part_spec.start_part= NO_CURRENT_PART_ID;
+end_dont_reset_start_part:
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(result);
}
@@ -3275,7 +3302,15 @@ int ha_partition::index_init(uint inx, bool sorted)
m_start_key.length= 0;
m_ordered= sorted;
m_curr_key_info= table->key_info+inx;
- include_partition_fields_in_used_fields();
+ /*
+ Some handlers only read fields as specified by the bitmap for the
+ read set. For partitioned handlers we always require that the
+ fields of the partition functions are read such that we can
+ calculate the partition id to place updated and deleted records.
+ But this is required for operations that may need to change data only.
+ */
+ if (m_lock_type == F_WRLCK)
+ bitmap_union(table->read_set, &m_part_info->full_part_field_set);
file= m_file;
do
{
@@ -4144,35 +4179,6 @@ int ha_partition::handle_ordered_prev(uchar *buf)
}
-/*
- Set fields in partition functions in read set for underlying handlers
-
- SYNOPSIS
- include_partition_fields_in_used_fields()
-
- RETURN VALUE
- NONE
-
- DESCRIPTION
- Some handlers only read fields as specified by the bitmap for the
- read set. For partitioned handlers we always require that the
- fields of the partition functions are read such that we can
- calculate the partition id to place updated and deleted records.
-*/
-
-void ha_partition::include_partition_fields_in_used_fields()
-{
- Field **ptr= m_part_field_array;
- DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
-
- do
- {
- bitmap_set_bit(table->read_set, (*ptr)->field_index);
- } while (*(++ptr));
- DBUG_VOID_RETURN;
-}
-
-
/****************************************************************************
MODULE information calls
****************************************************************************/
@@ -4714,6 +4720,12 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
HA_EXTRA_KEY_CACHE:
HA_EXTRA_NO_KEY_CACHE:
This parameters are no longer used and could be removed.
+
+ 7) Parameters only used by federated tables for query processing
+ ----------------------------------------------------------------
+ HA_EXTRA_INSERT_WITH_UPDATE:
+ Inform handler that an "INSERT...ON DUPLICATE KEY UPDATE" will be
+ executed. This condition is unset by HA_EXTRA_NO_IGNORE_DUP_KEY.
*/
int ha_partition::extra(enum ha_extra_function operation)
@@ -4795,6 +4807,9 @@ int ha_partition::extra(enum ha_extra_function operation)
*/
break;
}
+ /* Category 7), used by federated handlers */
+ case HA_EXTRA_INSERT_WITH_UPDATE:
+ DBUG_RETURN(loop_extra(operation));
default:
{
/* Temporary crash to discover what is wrong */
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index a168007ea04..895f001fa6a 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -449,7 +449,6 @@ private:
int handle_ordered_next(uchar * buf, bool next_same);
int handle_ordered_prev(uchar * buf);
void return_top_record(uchar * buf);
- void include_partition_fields_in_used_fields();
public:
/*
-------------------------------------------------------------------------
diff --git a/sql/handler.h b/sql/handler.h
index 4095a2f4cb1..09de9a3873a 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -328,13 +328,21 @@ typedef ulonglong my_xid; // this line is the same as in log_event.h
#define MYSQL_XID_OFFSET (MYSQL_XID_PREFIX_LEN+sizeof(server_id))
#define MYSQL_XID_GTRID_LEN (MYSQL_XID_OFFSET+sizeof(my_xid))
-#define XIDDATASIZE 128
+#define XIDDATASIZE MYSQL_XIDDATASIZE
#define MAXGTRIDSIZE 64
#define MAXBQUALSIZE 64
#define COMPATIBLE_DATA_YES 0
#define COMPATIBLE_DATA_NO 1
+/**
+ struct xid_t is binary compatible with the XID structure as
+ in the X/Open CAE Specification, Distributed Transaction Processing:
+ The XA Specification, X/Open Company Ltd., 1991.
+ http://www.opengroup.org/bookstore/catalog/c193.htm
+
+ @see MYSQL_XID in mysql/plugin.h
+*/
struct xid_t {
long formatID;
long gtrid_length;
diff --git a/sql/item.h b/sql/item.h
index 6df85476f03..6d993d72821 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -843,8 +843,7 @@ public:
german character for double s is equal to 2 s.
The default is that an item is not allowed
- in a partition function. However all mathematical functions, string
- manipulation functions, date functions are allowed. Allowed functions
+ in a partition function. Allowed functions
can never depend on server version, they cannot depend on anything
related to the environment. They can also only depend on a set of
fields in the table itself. They cannot depend on other tables and
@@ -1633,6 +1632,7 @@ public:
uint decimal_precision() const
{ return (uint)(max_length - test(value < 0)); }
bool eq(const Item *, bool binary_cmp) const;
+ bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
};
@@ -1650,6 +1650,7 @@ public:
void print(String *str);
Item_num *neg ();
uint decimal_precision() const { return max_length; }
+ bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
};
@@ -1692,6 +1693,7 @@ public:
uint decimal_precision() const { return decimal_value.precision(); }
bool eq(const Item *, bool binary_cmp) const;
void set_decimal_value(my_decimal *value_par);
+ bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
};
@@ -1752,7 +1754,6 @@ public:
{}
void print(String *str) { str->append(func_name); }
Item *safe_charset_converter(CHARSET_INFO *tocs);
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -1861,7 +1862,6 @@ public:
CHARSET_INFO *cs= NULL):
Item_string(name, length, cs)
{}
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -1915,7 +1915,6 @@ public:
unsigned_flag=1;
}
enum_field_types field_type() const { return int_field_type; }
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -2116,6 +2115,12 @@ public:
bool fix_fields(THD *, Item **);
bool eq(const Item *item, bool binary_cmp) const;
+ Item *get_tmp_table_item(THD *thd)
+ {
+ Item *item= Item_ref::get_tmp_table_item(thd);
+ item->name= name;
+ return item;
+ }
virtual Ref_Type ref_type() { return VIEW_REF; }
};
@@ -2237,7 +2242,6 @@ public:
}
Item *clone_item();
virtual Item *real_item() { return ref; }
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
#ifdef MYSQL_SERVER
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 1bc52ea093c..ee36aca069d 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -355,7 +355,6 @@ public:
}
Item *neg_transformer(THD *thd);
virtual Item *negated_item();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool subst_argument_checker(uchar **arg) { return TRUE; }
};
@@ -367,7 +366,6 @@ public:
enum Functype functype() const { return NOT_FUNC; }
const char *func_name() const { return "not"; }
Item *neg_transformer(THD *thd);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
void print(String *str);
};
@@ -598,7 +596,6 @@ public:
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
uint decimal_precision() const { return 1; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -610,7 +607,6 @@ public:
optimize_type select_optimize() const { return OPTIMIZE_NONE; }
const char *func_name() const { return "strcmp"; }
void print(String *str) { Item_func::print(str); }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -673,7 +669,6 @@ public:
const char *func_name() const { return "ifnull"; }
Field *tmp_table_field(TABLE *table);
uint decimal_precision() const;
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -714,7 +709,6 @@ public:
void print(String *str) { Item_func::print(str); }
table_map not_null_tables() const { return 0; }
bool is_null();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
/* Functions to handle the optimized IN */
@@ -1141,7 +1135,6 @@ public:
void print(String *str);
Item *find_item(String *str);
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
- bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
void cleanup();
};
@@ -1211,7 +1204,6 @@ public:
bool nulls_in_row();
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class cmp_item_row :public cmp_item
@@ -1283,7 +1275,6 @@ public:
optimize_type select_optimize() const { return OPTIMIZE_NULL; }
Item *neg_transformer(THD *thd);
CHARSET_INFO *compare_collation() { return args[0]->collation.collation; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
/* Functions used by HAVING for rewriting IN subquery */
@@ -1310,7 +1301,6 @@ public:
*/
table_map used_tables() const
{ return used_tables_cache | RAND_TABLE_BIT; }
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -1333,7 +1323,6 @@ public:
void print(String *str);
CHARSET_INFO *compare_collation() { return args[0]->collation.collation; }
void top_level_item() { abort_on_null=1; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -1372,7 +1361,6 @@ public:
const char *func_name() const { return "like"; }
bool fix_fields(THD *thd, Item **ref);
void cleanup();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
#ifdef USE_REGEX
@@ -1395,7 +1383,6 @@ public:
const char *func_name() const { return "regexp"; }
void print(String *str) { print_op(str); }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
#else
@@ -1452,7 +1439,6 @@ public:
Item *transform(Item_transformer transformer, uchar *arg);
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
void neg_arguments(THD *thd);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool subst_argument_checker(uchar **arg) { return TRUE; }
Item *compile(Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t);
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 20041b1176a..e20926c564f 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -5039,6 +5039,18 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type,
my_error(ER_M_BIGGER_THAN_D, MYF(0), "");
return 0;
}
+ if (len > DECIMAL_MAX_PRECISION)
+ {
+ my_error(ER_TOO_BIG_PRECISION, MYF(0), len, a->name,
+ DECIMAL_MAX_PRECISION);
+ return 0;
+ }
+ if (dec > DECIMAL_MAX_SCALE)
+ {
+ my_error(ER_TOO_BIG_SCALE, MYF(0), dec, a->name,
+ DECIMAL_MAX_SCALE);
+ return 0;
+ }
res= new (thd->mem_root) Item_decimal_typecast(a, len, dec);
break;
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 8fc68f93e12..568effb2f63 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -253,7 +253,6 @@ public:
void fix_num_length_and_dec();
void find_num_type();
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -311,7 +310,6 @@ public:
{ max_length=args[0]->max_length; unsigned_flag=0; }
void print(String *str);
uint decimal_precision() const { return args[0]->decimal_precision(); }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -345,7 +343,6 @@ public:
void fix_length_and_dec() {};
const char *func_name() const { return "decimal_typecast"; }
void print(String *);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -442,6 +439,7 @@ public:
void fix_length_and_dec();
void fix_num_length_and_dec();
uint decimal_precision() const { return args[0]->decimal_precision(); }
+ bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -454,6 +452,7 @@ public:
my_decimal *decimal_op(my_decimal *);
const char *func_name() const { return "abs"; }
void fix_length_and_dec();
+ bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
// A class to handle logarithmic and trigonometric functions
@@ -488,7 +487,6 @@ public:
Item_func_exp(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "exp"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -498,7 +496,6 @@ public:
Item_func_ln(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "ln"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -509,7 +506,6 @@ public:
Item_func_log(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "log"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -519,7 +515,6 @@ public:
Item_func_log2(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "log2"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -529,7 +524,6 @@ public:
Item_func_log10(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "log10"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -539,7 +533,6 @@ public:
Item_func_sqrt(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "sqrt"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -549,7 +542,6 @@ public:
Item_func_pow(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "pow"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -559,7 +551,6 @@ public:
Item_func_acos(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "acos"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_asin :public Item_dec_func
@@ -568,7 +559,6 @@ public:
Item_func_asin(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "asin"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_atan :public Item_dec_func
@@ -578,7 +568,6 @@ public:
Item_func_atan(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "atan"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_cos :public Item_dec_func
@@ -587,7 +576,6 @@ public:
Item_func_cos(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "cos"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_sin :public Item_dec_func
@@ -596,7 +584,6 @@ public:
Item_func_sin(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "sin"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_tan :public Item_dec_func
@@ -605,7 +592,6 @@ public:
Item_func_tan(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "tan"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_integer :public Item_int_func
@@ -633,6 +619,7 @@ public:
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
+ bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -644,6 +631,7 @@ public:
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
+ bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
/* This handles round and truncate */
@@ -684,7 +672,6 @@ public:
Item_func_sign(Item *a) :Item_int_func(a) {}
const char *func_name() const { return "sign"; }
longlong val_int();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -699,7 +686,6 @@ public:
const char *func_name() const { return name; }
void fix_length_and_dec()
{ decimals= NOT_FIXED_DEC; max_length= float_length(decimals); }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -725,7 +711,6 @@ public:
void fix_length_and_dec();
enum Item_result result_type () const { return cmp_type; }
bool result_as_longlong() { return compare_as_dates; };
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
uint cmp_datetimes(ulonglong *value);
};
@@ -781,7 +766,6 @@ public:
longlong val_int();
const char *func_name() const { return "length"; }
void fix_length_and_dec() { max_length=10; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_bit_length :public Item_func_length
@@ -801,7 +785,6 @@ public:
longlong val_int();
const char *func_name() const { return "char_length"; }
void fix_length_and_dec() { max_length=10; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_coercibility :public Item_int_func
@@ -825,7 +808,6 @@ public:
longlong val_int();
void fix_length_and_dec();
void print(String *str);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -850,7 +832,6 @@ public:
longlong val_int();
const char *func_name() const { return "ascii"; }
void fix_length_and_dec() { max_length=3; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_ord :public Item_int_func
@@ -860,7 +841,6 @@ public:
Item_func_ord(Item *a) :Item_int_func(a) {}
longlong val_int();
const char *func_name() const { return "ord"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_find_in_set :public Item_int_func
@@ -874,7 +854,6 @@ public:
longlong val_int();
const char *func_name() const { return "find_in_set"; }
void fix_length_and_dec();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
/* Base class for all bit functions: '~', '|', '^', '&', '>>', '<<' */
@@ -886,7 +865,6 @@ public:
Item_func_bit(Item *a) :Item_int_func(a) {}
void fix_length_and_dec() { unsigned_flag= 1; }
void print(String *str) { print_op(str); }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_bit_or :public Item_func_bit
@@ -912,7 +890,6 @@ public:
longlong val_int();
const char *func_name() const { return "bit_count"; }
void fix_length_and_dec() { max_length=2; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_shift_left :public Item_func_bit
@@ -1363,7 +1340,6 @@ public:
longlong val_int();
const char *func_name() const { return "inet_aton"; }
void fix_length_and_dec() { decimals= 0; max_length= 21; maybe_null= 1; unsigned_flag= 1;}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index c036ec490db..6d2d9c199c9 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -50,7 +50,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "md5"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -94,7 +93,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "concat"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_concat_ws :public Item_str_func
@@ -116,7 +114,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "reverse"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -154,7 +151,6 @@ protected:
public:
Item_str_conv(Item *item) :Item_str_func(item) {}
String *val_str(String *);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -455,7 +451,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "soundex"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -549,7 +544,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "rpad"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -562,7 +556,6 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "lpad"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -577,7 +570,6 @@ public:
collation.set(default_charset());
max_length= 64;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -594,7 +586,6 @@ public:
decimals=0;
max_length=args[0]->max_length*2*collation.collation->mbmaxlen;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_unhex :public Item_str_func
@@ -614,7 +605,6 @@ public:
decimals=0;
max_length=(1+args[0]->max_length)/2;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -638,7 +628,6 @@ public:
}
void print(String *str);
const char *func_name() const { return "cast_as_binary"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -678,7 +667,6 @@ public:
String* val_str(String* str);
const char *func_name() const { return "inet_ntoa"; }
void fix_length_and_dec() { decimals = 0; max_length=3*8+7; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_quote :public Item_str_func
@@ -693,7 +681,6 @@ public:
collation.set(args[0]->collation);
max_length= args[0]->max_length * 2 + 2;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_conv_charset :public Item_str_func
@@ -794,7 +781,6 @@ public:
const char *func_name() const { return "crc32"; }
void fix_length_and_dec() { max_length=10; }
longlong val_int();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_uncompressed_length : public Item_int_func
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index e2d4c02d986..0fa46d231a9 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3230,6 +3230,27 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
null_value= 1;
max_length= thd->variables.group_concat_max_len;
+ uint32 offset;
+ if (separator->needs_conversion(separator->length(), separator->charset(),
+ collation.collation, &offset))
+ {
+ uint32 buflen= collation.collation->mbmaxlen * separator->length();
+ uint errors, conv_length;
+ char *buf;
+ String *new_separator;
+
+ if (!(buf= (char*) thd->stmt_arena->alloc(buflen)) ||
+ !(new_separator= new(thd->stmt_arena->mem_root)
+ String(buf, buflen, collation.collation)))
+ return TRUE;
+
+ conv_length= copy_and_convert(buf, buflen, collation.collation,
+ separator->ptr(), separator->length(),
+ separator->charset(), &errors);
+ new_separator->length(conv_length);
+ separator= new_separator;
+ }
+
if (check_sum_func(thd, ref))
return TRUE;
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index bd0954e6bdb..32f5bcf8e52 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -38,7 +38,6 @@ public:
{
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -53,7 +52,6 @@ public:
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -129,6 +127,7 @@ public:
max_length=10*my_charset_bin.mbmaxlen;
maybe_null=1;
}
+ bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -224,7 +223,6 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_yearweek :public Item_int_func
@@ -303,6 +301,7 @@ class Item_func_dayname :public Item_func_weekday
max_length=9*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
};
@@ -319,7 +318,6 @@ public:
decimals=0;
max_length=10*MY_CHARSET_BIN_MB_MAXLEN;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -603,7 +601,6 @@ public:
void fix_length_and_dec();
uint format_length(const String *format);
bool eq(const Item *item, bool binary_cmp) const;
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -617,7 +614,6 @@ class Item_func_from_unixtime :public Item_date_func
const char *func_name() const { return "from_unixtime"; }
void fix_length_and_dec();
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -676,7 +672,6 @@ public:
}
const char *func_name() const { return "sec_to_time"; }
bool result_as_longlong() { return TRUE; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -698,7 +693,6 @@ public:
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -753,7 +747,6 @@ public:
max_length=args[0]->max_length;
maybe_null= 1;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -773,7 +766,6 @@ public:
String *val_str(String *a);
void fix_length_and_dec();
void print(String *str);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -884,7 +876,6 @@ public:
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
longlong val_int();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -907,7 +898,6 @@ public:
}
void print(String *str);
const char *func_name() const { return "add_time"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
double val_real() { return val_real_from_decimal(); }
my_decimal *val_decimal(my_decimal *decimal_value)
{
@@ -949,7 +939,6 @@ public:
:Item_str_timefunc(a, b ,c) {}
String *val_str(String *str);
const char *func_name() const { return "maketime"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
class Item_func_microsecond :public Item_int_func
@@ -981,7 +970,6 @@ public:
maybe_null=1;
}
void print(String *str);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -1028,7 +1016,6 @@ public:
{
return tmp_table_field_from_field_type(table, 1);
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h
index 650893fa7bd..278c98baf7c 100644
--- a/sql/item_xmlfunc.h
+++ b/sql/item_xmlfunc.h
@@ -41,7 +41,6 @@ public:
Item_func_xml_extractvalue(Item *a,Item *b) :Item_xml_str_func(a,b) {}
const char *func_name() const { return "extractvalue"; }
String *val_str(String *);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
diff --git a/sql/log.cc b/sql/log.cc
index 6dc204265b0..cd1acb35ece 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1445,7 +1445,7 @@ static int binlog_close_connection(handlerton *hton, THD *thd)
{
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- DBUG_ASSERT(mysql_bin_log.is_open() && trx_data->empty());
+ DBUG_ASSERT(trx_data->empty());
thd->ha_data[binlog_hton->slot]= 0;
trx_data->~binlog_trx_data();
my_free((uchar*)trx_data, MYF(0));
@@ -1570,7 +1570,6 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all)
DBUG_ENTER("binlog_commit");
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- DBUG_ASSERT(mysql_bin_log.is_open());
if (trx_data->empty())
{
@@ -1598,7 +1597,6 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
int error=0;
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- DBUG_ASSERT(mysql_bin_log.is_open());
if (trx_data->empty()) {
trx_data->reset();
@@ -1659,7 +1657,6 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
{
DBUG_ENTER("binlog_savepoint_rollback");
- DBUG_ASSERT(mysql_bin_log.is_open());
/*
Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some
@@ -3945,13 +3942,118 @@ int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
return ER_ERROR_ON_WRITE;
- uint bytes= my_b_bytes_in_cache(cache);
+ uint bytes= my_b_bytes_in_cache(cache), group, carry, hdr_offs;
+ long val;
+ uchar header[LOG_EVENT_HEADER_LEN];
+
+ /*
+ The events in the buffer have incorrect end_log_pos data
+ (relative to beginning of group rather than absolute),
+ so we'll recalculate them in situ so the binlog is always
+ correct, even in the middle of a group. This is possible
+ because we now know the start position of the group (the
+ offset of this cache in the log, if you will); all we need
+ to do is to find all event-headers, and add the position of
+ the group to the end_log_pos of each event. This is pretty
+ straight forward, except that we read the cache in segments,
+ so an event-header might end up on the cache-border and get
+ split.
+ */
+
+ group= (uint)my_b_tell(&log_file);
+ hdr_offs= carry= 0;
+
do
{
+
+ /*
+ if we only got a partial header in the last iteration,
+ get the other half now and process a full header.
+ */
+ if (unlikely(carry > 0))
+ {
+ DBUG_ASSERT(carry < LOG_EVENT_HEADER_LEN);
+
+ /* assemble both halves */
+ memcpy(&header[carry], (char *)cache->read_pos, LOG_EVENT_HEADER_LEN - carry);
+
+ /* fix end_log_pos */
+ val= uint4korr(&header[LOG_POS_OFFSET]) + group;
+ int4store(&header[LOG_POS_OFFSET], val);
+
+ /* write the first half of the split header */
+ if (my_b_write(&log_file, header, carry))
+ return ER_ERROR_ON_WRITE;
+
+ /*
+ copy fixed second half of header to cache so the correct
+ version will be written later.
+ */
+ memcpy((char *)cache->read_pos, &header[carry], LOG_EVENT_HEADER_LEN - carry);
+
+ /* next event header at ... */
+ hdr_offs = uint4korr(&header[EVENT_LEN_OFFSET]) - carry;
+
+ carry= 0;
+ }
+
+ /* if there is anything to write, process it. */
+
+ if (likely(bytes > 0))
+ {
+ /*
+ process all event-headers in this (partial) cache.
+ if next header is beyond current read-buffer,
+ we'll get it later (though not necessarily in the
+ very next iteration, just "eventually").
+ */
+
+ while (hdr_offs < bytes)
+ {
+ /*
+ partial header only? save what we can get, process once
+ we get the rest.
+ */
+
+ if (hdr_offs + LOG_EVENT_HEADER_LEN > bytes)
+ {
+ carry= bytes - hdr_offs;
+ memcpy(header, (char *)cache->read_pos + hdr_offs, carry);
+ bytes= hdr_offs;
+ }
+ else
+ {
+ /* we've got a full event-header, and it came in one piece */
+
+ uchar *log_pos= (uchar *)cache->read_pos + hdr_offs + LOG_POS_OFFSET;
+
+ /* fix end_log_pos */
+ val= uint4korr(log_pos) + group;
+ int4store(log_pos, val);
+
+ /* next event header at ... */
+ log_pos= (uchar *)cache->read_pos + hdr_offs + EVENT_LEN_OFFSET;
+ hdr_offs += uint4korr(log_pos);
+
+ }
+ }
+
+ /*
+ Adjust hdr_offs. Note that this doesn't mean it will necessarily
+ be valid in the next iteration; if the current event is very long,
+ it may take a couple of read-iterations (and subsequent fixings
+ of hdr_offs) for it to become valid again.
+ if we had a split header, hdr_offs was already fixed above.
+ */
+ if (carry == 0)
+ hdr_offs -= bytes;
+ }
+
+ /* Write data to the binary log file */
if (my_b_write(&log_file, cache->read_pos, bytes))
return ER_ERROR_ON_WRITE;
- cache->read_pos= cache->read_end;
- } while ((bytes= my_b_fill(cache)));
+ cache->read_pos=cache->read_end; // Mark buffer used up
+ } while ((bytes=my_b_fill(cache)));
if (sync_log)
flush_and_sync();
@@ -4028,7 +4130,7 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
if ((write_error= write_cache(cache, false, false)))
goto err;
-
+
if (commit_event && commit_event->write(&log_file))
goto err;
if (flush_and_sync())
@@ -5140,6 +5242,29 @@ err1:
return 1;
}
+
+#ifdef INNODB_COMPATIBILITY_HOOKS
+/**
+ Get the file name of the MySQL binlog.
+ @return the name of the binlog file
+*/
+extern "C"
+const char* mysql_bin_log_file_name(void)
+{
+ return mysql_bin_log.get_log_fname();
+}
+/**
+ Get the current position of the MySQL binlog.
+ @return byte offset from the beginning of the binlog
+*/
+extern "C"
+ulonglong mysql_bin_log_file_pos(void)
+{
+ return (ulonglong) mysql_bin_log.get_log_file()->pos_in_file;
+}
+#endif /* INNODB_COMPATIBILITY_HOOKS */
+
+
struct st_mysql_storage_engine binlog_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 02d3a949cb3..c062aa99b4e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -6104,8 +6104,9 @@ int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli)
default:
rli->report(ERROR_LEVEL, thd->net.last_errno,
- "Error in %s event: row application failed",
- get_type_str());
+ "Error in %s event: row application failed. %s",
+ get_type_str(),
+ thd->net.last_error ? thd->net.last_error : "");
thd->query_error= 1;
break;
}
@@ -6126,9 +6127,10 @@ int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli)
{ /* error has occured during the transaction */
rli->report(ERROR_LEVEL, thd->net.last_errno,
"Error in %s event: error during transaction execution "
- "on table %s.%s",
+ "on table %s.%s. %s",
get_type_str(), table->s->db.str,
- table->s->table_name.str);
+ table->s->table_name.str,
+ thd->net.last_error ? thd->net.last_error : "");
/*
If one day we honour --skip-slave-errors in row-based replication, and
@@ -7092,7 +7094,12 @@ replace_record(THD *thd, TABLE *table,
}
if ((keynum= table->file->get_dup_key(error)) < 0)
{
- /* We failed to retrieve the duplicate key */
+ table->file->print_error(error, MYF(0));
+ /*
+ We failed to retrieve the duplicate key
+ - either because the error was not "duplicate key" error
+ - or because the information which key is not available
+ */
DBUG_RETURN(error);
}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 8b00cfb5d24..f3a987ea744 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -98,7 +98,7 @@ char* query_table_status(THD *thd,const char *db,const char *table_name);
DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) > 0); \
if (((uchar*)Thd) != NULL) \
push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), \
+ ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX_WITH_VER), \
(Old), (Ver), (New)); \
else \
sql_print_warning("The syntax '%s' is deprecated and will be removed " \
@@ -574,6 +574,13 @@ enum enum_parsing_place
struct st_table;
class THD;
+enum enum_check_fields
+{
+ CHECK_FIELD_IGNORE,
+ CHECK_FIELD_WARN,
+ CHECK_FIELD_ERROR_FOR_NULL
+};
+
/* Struct to handle simple linked lists */
typedef struct st_sql_list {
@@ -1172,7 +1179,11 @@ bool mysqld_show_open_tables(THD *thd,const char *wild);
bool mysqld_show_logs(THD *thd);
void append_identifier(THD *thd, String *packet, const char *name,
uint length);
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
void mysqld_list_fields(THD *thd,TABLE_LIST *table, const char *wild);
int mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd);
bool mysqld_show_create(THD *thd, TABLE_LIST *table_list);
@@ -1201,9 +1212,6 @@ void reset_status_vars();
/* information schema */
extern LEX_STRING INFORMATION_SCHEMA_NAME;
extern const LEX_STRING partition_keywords[];
-LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
- const char* str, uint length,
- bool allocate_lex_string);
ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name);
ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx);
int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
@@ -1681,9 +1689,14 @@ extern int creating_table; // How many mysql_create_table() are running
*/
extern time_t server_start_time;
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
extern uint mysql_data_home_len;
extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH],
- mysql_real_data_home[], *opt_mysql_tmpdir, mysql_charsets_dir[],
+ mysql_real_data_home[];
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
+extern char *opt_mysql_tmpdir, mysql_charsets_dir[],
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
#define mysql_tmpdir (my_tmpdir(&mysql_tmpdir_list))
extern MY_TMPDIR mysql_tmpdir_list;
@@ -1700,8 +1713,13 @@ extern Gt_creator gt_creator;
extern Lt_creator lt_creator;
extern Ge_creator ge_creator;
extern Le_creator le_creator;
-extern char language[FN_REFLEN], reg_ext[FN_EXTLEN];
+extern char language[FN_REFLEN];
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
+extern char reg_ext[FN_EXTLEN];
extern uint reg_ext_length;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file;
extern char log_error_file[FN_REFLEN], *opt_tc_log_file;
@@ -1730,17 +1748,32 @@ extern ulong max_binlog_size, max_relay_log_size;
extern ulong opt_binlog_rows_event_max_size;
extern ulong rpl_recovery_rank, thread_cache_size, thread_pool_size;
extern ulong back_log;
-extern ulong specialflag, current_pid;
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
+extern ulong specialflag;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
+extern ulong current_pid;
extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter;
extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size;
extern ulong tc_log_page_waits;
extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb;
extern uint test_flags,select_errors,ha_open_options;
extern uint protocol_version, mysqld_port, dropping_tables;
-extern uint delay_key_write_options, lower_case_table_names;
+extern uint delay_key_write_options;
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
+extern uint lower_case_table_names;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
extern bool opt_endinfo, using_udf_functions;
extern my_bool locked_in_memory;
-extern bool opt_using_transactions, mysqld_embedded;
+extern bool opt_using_transactions;
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
+extern bool mysqld_embedded;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
extern bool using_update_log, opt_large_files, server_id_supplied;
extern bool opt_update_log, opt_bin_log, opt_error_log;
extern my_bool opt_log, opt_slow_log;
@@ -1764,8 +1797,12 @@ extern uint opt_crash_binlog_innodb;
extern char *shared_memory_base_name, *mysqld_unix_port;
extern my_bool opt_enable_shared_memory;
extern char *default_tz_name;
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
extern my_bool opt_large_pages;
extern uint opt_large_page_size;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
extern char *opt_logname, *opt_slow_logname;
extern const char *log_output_str;
@@ -1801,7 +1838,11 @@ extern MY_BITMAP temp_pool;
extern String my_empty_string;
extern const String my_null_string;
extern SHOW_VAR status_vars[];
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
extern struct system_variables global_system_variables;
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
extern struct system_variables max_system_variables;
extern struct system_status_var global_status_var;
extern struct rand_struct sql_rand;
@@ -2004,10 +2045,14 @@ int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr);
char *fn_rext(char *name);
/* Conversion functions */
+#endif /* MYSQL_SERVER */
+#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
uint strconvert(CHARSET_INFO *from_cs, const char *from,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors);
uint filename_to_tablename(const char *from, char *to, uint to_length);
uint tablename_to_filename(const char *from, char *to, uint to_length);
+#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
+#ifdef MYSQL_SERVER
uint build_table_filename(char *buff, size_t bufflen, const char *db,
const char *table, const char *ext, uint flags);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 39a7c4e9095..6d22047b9db 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -762,6 +762,7 @@ static void close_connections(void)
DBUG_PRINT("info",("Waiting for select thread"));
#ifndef DONT_USE_THR_ALARM
+ if (pthread_kill(select_thread, thr_client_alarm))
break; // allready dead
#endif
set_timespec(abstime, 2);
@@ -7819,8 +7820,6 @@ static void get_options(int *argc,char **argv)
if (opt_short_log_format)
opt_specialflag|= SPECIAL_SHORT_LOG_FORMAT;
- if (opt_log_queries_not_using_indexes)
- opt_specialflag|= SPECIAL_LOG_QUERIES_NOT_USING_INDEXES;
if (init_global_datetime_format(MYSQL_TIMESTAMP_DATE,
&global_system_variables.date_format) ||
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 23bc3c96e8f..308b96d84f8 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -905,7 +905,6 @@ bool partition_info::set_up_charset_field_preps()
Field *field, **ptr;
uchar **char_ptrs;
unsigned i;
- bool found;
size_t size;
uint tot_fields= 0;
uint tot_part_fields= 0;
@@ -918,7 +917,6 @@ bool partition_info::set_up_charset_field_preps()
{
ptr= part_field_array;
/* Set up arrays and buffers for those fields */
- i= 0;
while ((field= *(ptr++)))
{
if (field_is_partition_charset(field))
@@ -954,7 +952,7 @@ bool partition_info::set_up_charset_field_preps()
}
part_charset_field_array[i]= NULL;
}
- if (is_sub_partitioned() && list_of_subpart_fields &&
+ if (is_sub_partitioned() && !list_of_subpart_fields &&
check_part_func_fields(subpart_field_array, FALSE))
{
/* Set up arrays and buffers for those fields */
@@ -962,7 +960,10 @@ bool partition_info::set_up_charset_field_preps()
while ((field= *(ptr++)))
{
if (field_is_partition_charset(field))
+ {
tot_subpart_fields++;
+ tot_fields++;
+ }
}
size= tot_subpart_fields * sizeof(char*);
if (!(char_ptrs= (uchar**) sql_calloc(size)))
@@ -975,10 +976,10 @@ bool partition_info::set_up_charset_field_preps()
if (!(char_ptrs= (uchar**) sql_alloc(size)))
goto error;
subpart_charset_field_array= (Field**)char_ptrs;
+ ptr= subpart_field_array;
i= 0;
while ((field= *(ptr++)))
{
- unsigned j= 0;
CHARSET_INFO *cs;
uchar *field_buf;
LINT_INIT(field_buf);
@@ -987,28 +988,16 @@ bool partition_info::set_up_charset_field_preps()
continue;
cs= ((Field_str*)field)->charset();
size= field->pack_length();
- found= FALSE;
- for (j= 0; j < tot_part_fields; j++)
- {
- if (field == part_charset_field_array[i])
- found= TRUE;
- }
- if (!found)
- {
- tot_fields++;
- if (!(field_buf= (uchar*) sql_calloc(size)))
- goto error;
- }
+ if (!(field_buf= (uchar*) sql_calloc(size)))
+ goto error;
+ subpart_charset_field_array[i]= field;
subpart_field_buffers[i++]= field_buf;
}
- if (!(char_ptrs= (uchar**) sql_calloc(size)))
- goto error;
- restore_subpart_field_ptrs= char_ptrs;
+ subpart_charset_field_array[i]= NULL;
}
if (tot_fields)
{
- uint j,k,l;
-
+ uint k;
size= tot_fields*sizeof(char**);
if (!(char_ptrs= (uchar**)sql_calloc(size)))
goto error;
@@ -1026,11 +1015,12 @@ bool partition_info::set_up_charset_field_preps()
full_part_field_buffers[i]= part_field_buffers[i];
}
k= tot_part_fields;
- l= 0;
for (i= 0; i < tot_subpart_fields; i++)
{
+ uint j;
+ bool found= FALSE;
field= subpart_charset_field_array[i];
- found= FALSE;
+
for (j= 0; j < tot_part_fields; j++)
{
if (field == part_charset_field_array[i])
@@ -1038,12 +1028,12 @@ bool partition_info::set_up_charset_field_preps()
}
if (!found)
{
- full_part_charset_field_array[l]= subpart_charset_field_array[k];
- full_part_field_buffers[l]= subpart_field_buffers[k];
- k++; l++;
+ full_part_charset_field_array[k]= subpart_charset_field_array[i];
+ full_part_field_buffers[k]= subpart_field_buffers[i];
+ k++;
}
}
- full_part_charset_field_array[tot_fields]= NULL;
+ full_part_charset_field_array[k]= NULL;
}
DBUG_RETURN(FALSE);
error:
diff --git a/sql/partition_info.h b/sql/partition_info.h
index ce2f2a7b358..10edea074c0 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -81,6 +81,13 @@ public:
*/
Field **full_part_field_array;
Field **full_part_charset_field_array;
+ /*
+ Set of all fields used in partition and subpartition expression.
+ Required for testing of partition fields in write_set when
+ updating. We need to set all bits in read_set because the row may
+ need to be inserted in a different [sub]partition.
+ */
+ MY_BITMAP full_part_field_set;
/*
When we have a field that requires transformation before calling the
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 0b26fa52e32..682eee06e02 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -1478,6 +1478,8 @@ ER_DUP_KEYNAME 42000 S1009
spa "Nombre de clave duplicado '%-.192s'"
swe "Nyckelnamn '%-.192s' finns flera gånger"
ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.192s'"
+# When using this error code, please use ER(ER_DUP_ENTRY_WITH_KEY_NAME)
+# for the message string. See, for example, code in handler.cc.
ER_DUP_ENTRY 23000 S1009
cze "Zdvojen-Bý klíè '%-.192s' (èíslo klíèe %d)"
dan "Ens værdier '%-.192s' for indeks %d"
@@ -5022,7 +5024,9 @@ ER_UNKNOWN_STORAGE_ENGINE 42000
ger "Unbekannte Speicher-Engine '%s'"
por "Motor de tabela desconhecido '%s'"
spa "Desconocido motor de tabla '%s'"
-ER_UNUSED_1
+# When using this error code, use ER(ER_WARN_DEPRECATED_SYNTAX_WITH_VER)
+# for the message string. See, for example, code in mysql_priv.h.
+ER_WARN_DEPRECATED_SYNTAX
eng "'%s' is deprecated; use '%s' instead"
ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
por "'%s' é desatualizado. Use '%s' em seu lugar"
@@ -5606,8 +5610,6 @@ ER_SP_RECURSION_LIMIT
ER_SP_PROC_TABLE_CORRUPT
eng "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
ger "Routine %-.192s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)"
-ER_FOREIGN_SERVER_EXISTS
- eng "The foreign server, %s, you are trying to create already exists."
ER_SP_WRONG_NAME 42000
eng "Incorrect routine name '%-.192s'"
ger "Ungültiger Routinenname '%-.192s'"
@@ -5629,6 +5631,34 @@ ER_NON_GROUPING_FIELD_USED 42000
ER_TABLE_CANT_HANDLE_SPKEYS
eng "The used table type doesn't support SPATIAL indexes"
ger "Der verwendete Tabellentyp unterstützt keine SPATIAL-Indizes"
+ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+ eng "Triggers can not be created on system tables"
+ ger "Trigger können nicht auf Systemtabellen erzeugt werden"
+ER_REMOVED_SPACES
+ eng "Leading spaces are removed from name '%s'"
+ ger "Führende Leerzeichen werden aus dem Namen '%s' entfernt"
+ER_AUTOINC_READ_FAILED
+ eng "Failed to read auto-increment value from storage engine"
+ ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
+ER_USERNAME
+ eng "user name"
+ ger "Benutzername"
+ER_HOSTNAME
+ eng "host name"
+ ger "Hostname"
+ER_WRONG_STRING_LENGTH
+ eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
+ER_NON_INSERTABLE_TABLE
+ eng "The target table %-.100s of the %s is not insertable-into"
+ ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
+ER_ADMIN_WRONG_MRG_TABLE
+ eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
+ER_FOREIGN_SERVER_EXISTS
+ eng "The foreign server, %s, you are trying to create already exists."
+ER_FOREIGN_SERVER_DOESNT_EXIST
+ eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
+ ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
ER_ILLEGAL_HA_CREATE_OPTION
eng "Table storage engine '%-.64s' does not support the create option '%.64s'"
ger "Speicher-Engine '%-.64s' der Tabelle unterstützt die Option '%.64s' nicht"
@@ -5848,9 +5878,6 @@ ER_BINLOG_ROW_WRONG_TABLE_DEF
ER_BINLOG_ROW_RBR_TO_SBR
eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
ger "Slave, die mit --log-slave-updates laufen, müssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Binärlog-Ereignisse loggen zu können"
-ER_FOREIGN_SERVER_DOESNT_EXIST
- eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
- ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
ER_EVENT_ALREADY_EXISTS
eng "Event '%-.192s' already exists"
ger "Event '%-.192s' existiert bereits"
@@ -5901,7 +5928,9 @@ ER_EVENT_DATA_TOO_LONG
ER_DROP_INDEX_FK
eng "Cannot drop index '%-.192s': needed in a foreign key constraint"
ger "Kann Index '%-.192s' nicht löschen: wird für einen Fremdschlüssel benötigt"
-ER_WARN_DEPRECATED_SYNTAX
+# When using this error message, use the ER_WARN_DEPRECATED_SYNTAX error
+# code. See, for example, code in mysql_priv.h.
+ER_WARN_DEPRECATED_SYNTAX_WITH_VER
eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead"
ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s"
ER_CANT_WRITE_LOCK_LOG_TABLE
@@ -5916,9 +5945,6 @@ ER_FOREIGN_DUPLICATE_KEY 23000 S1009
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error."
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
-ER_REMOVED_SPACES
- eng "Leading spaces are removed from name '%s'"
- ger "Führende Leerzeichen werden aus dem Namen '%s' entfernt"
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
ger "Kann nicht aus dem zeilenbasierten Binärlog-Format herauswechseln, wenn die Sitzung offene temporäre Tabellen hat"
@@ -5975,9 +6001,6 @@ ER_BASE64_DECODE_ERROR
eng "Decoding of base64 string failed"
swe "Avkodning av base64 sträng misslyckades"
ger "Der Server hat keine zeilenbasierte Replikation"
-ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
- eng "Triggers can not be created on system tables"
- ger "Trigger können nicht auf Systemtabellen erzeugt werden"
ER_EVENT_RECURSION_FORBIDDEN
eng "Recursion of EVENT DDL statements is forbidden when body is present"
ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert"
@@ -5987,27 +6010,12 @@ ER_EVENTS_DB_ERROR
ER_ONLY_INTEGERS_ALLOWED
eng "Only integers allowed as number here"
ger "An dieser Stelle sind nur Ganzzahlen zulässig"
-ER_AUTOINC_READ_FAILED
- eng "Failed to read auto-increment value from storage engine"
- ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
-ER_USERNAME
- eng "user name"
- ger "Benutzername"
-ER_HOSTNAME
- eng "host name"
- ger "Hostname"
-ER_WRONG_STRING_LENGTH
- eng "String '%-.70s' is too long for %s (should be no longer than %d)"
- ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
ER_UNSUPORTED_LOG_ENGINE
eng "This storage engine cannot be used for log tables""
ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden"
ER_BAD_LOG_STATEMENT
eng "You cannot '%s' a log table if logging is enabled"
ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
-ER_NON_INSERTABLE_TABLE
- eng "The target table %-.100s of the %s is not insertable-into"
- ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
ER_CANT_RENAME_LOG_TABLE
eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'"
ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen beim Umbenennen zu/von einer Logtabelle zwei Tabellen angegeben werden: die Logtabelle zu einer Archivtabelle und eine weitere Tabelle zurück zu '%s'"
@@ -6023,6 +6031,8 @@ ER_WRONG_PARAMETERS_TO_STORED_FCT 42000
ER_NATIVE_FCT_NAME_COLLISION
eng "This function '%-.192s' has the same name as a native function"
ger "Die Funktion '%-.192s' hat denselben Namen wie eine native Funktion"
+# When using this error message, use the ER_DUP_ENTRY error code. See, for
+# example, code in handler.cc.
ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe '%-.192s')"
dan "Ens værdier '%-.64s' for indeks '%-.192s'"
@@ -6059,8 +6069,6 @@ ER_SLAVE_INCIDENT
eng "The incident %s occured on the master. Message: %-.64s"
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
eng "Table has no partition for some existing values"
-ER_ADMIN_WRONG_MRG_TABLE
- eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
ER_BINLOG_UNSAFE_STATEMENT
eng "Statement is not safe to log in statement format."
swe "Detta är inte säkert att logga i statement-format."
diff --git a/sql/slave.cc b/sql/slave.cc
index 6c7968c2b3f..d2c5b4fd254 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -59,6 +59,58 @@ ulonglong relay_log_space_limit = 0;
int disconnect_slave_event_count = 0, abort_slave_event_count = 0;
int events_till_abort = -1;
+enum enum_slave_reconnect_actions
+{
+ SLAVE_RECON_ACT_REG= 0,
+ SLAVE_RECON_ACT_DUMP= 1,
+ SLAVE_RECON_ACT_EVENT= 2,
+ SLAVE_RECON_ACT_MAX
+};
+
+enum enum_slave_reconnect_messages
+{
+ SLAVE_RECON_MSG_WAIT= 0,
+ SLAVE_RECON_MSG_KILLED_WAITING= 1,
+ SLAVE_RECON_MSG_AFTER= 2,
+ SLAVE_RECON_MSG_FAILED= 3,
+ SLAVE_RECON_MSG_COMMAND= 4,
+ SLAVE_RECON_MSG_KILLED_AFTER= 5,
+ SLAVE_RECON_MSG_MAX
+};
+
+static const char *reconnect_messages[SLAVE_RECON_ACT_MAX][SLAVE_RECON_MSG_MAX]=
+{
+ {
+ "Waiting to reconnect after a failed registration on master",
+ "Slave I/O thread killed while waitnig to reconnect after a failed \
+registration on master",
+ "Reconnecting after a failed registration on master",
+ "failed registering on master, reconnecting to try again, \
+log '%s' at postion %s",
+ "COM_REGISTER_SLAVE",
+ "Slave I/O thread killed during or after reconnect"
+ },
+ {
+ "Waiting to reconnect after a failed binlog dump request",
+ "Slave I/O thread killed while retrying master dump",
+ "Reconnecting after a failed binlog dump request",
+ "failed dump request, reconnecting to try again, log '%s' at postion %s",
+ "COM_BINLOG_DUMP",
+ "Slave I/O thread killed during or after reconnect"
+ },
+ {
+ "Waiting to reconnect after a failed master event read",
+ "Slave I/O thread killed while waiting to reconnect after a failed read",
+ "Reconnecting after a failed master event read",
+ "Slave I/O thread: Failed reading log event, reconnecting to retry, \
+log '%s' at postion %s",
+ "",
+ "Slave I/O thread killed during or after a reconnect done to recover from \
+failed read"
+ }
+};
+
+
typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE;
static int process_io_rotate(MASTER_INFO* mi, Rotate_log_event* rev);
@@ -1098,12 +1150,14 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi)
}
-int register_slave_on_master(MYSQL* mysql, MASTER_INFO *mi)
+int register_slave_on_master(MYSQL* mysql, MASTER_INFO *mi,
+ bool *suppress_warnings)
{
uchar buf[1024], *pos= buf;
uint report_host_len, report_user_len=0, report_password_len=0;
DBUG_ENTER("register_slave_on_master");
+ *suppress_warnings= FALSE;
if (!report_host)
DBUG_RETURN(0);
report_host_len= strlen(report_host);
@@ -1127,11 +1181,18 @@ int register_slave_on_master(MYSQL* mysql, MASTER_INFO *mi)
if (simple_command(mysql, COM_REGISTER_SLAVE, buf, (size_t) (pos- buf), 0))
{
- char buf[256];
- my_snprintf(buf, sizeof(buf),
- "%s (Errno: %d)", mysql_error(mysql), mysql_errno(mysql));
- mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
- ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_REGISTER_SLAVE", buf);
+ if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
+ {
+ *suppress_warnings= TRUE; // Suppress reconnect warning
+ }
+ else
+ {
+ char buf[256];
+ my_snprintf(buf, sizeof(buf), "%s (Errno: %d)", mysql_error(mysql),
+ mysql_errno(mysql));
+ mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
+ ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_REGISTER_SLAVE", buf);
+ }
DBUG_RETURN(1);
}
DBUG_RETURN(0);
@@ -1460,6 +1521,8 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
int binlog_flags = 0; // for now
char* logname = mi->master_log_name;
DBUG_ENTER("request_dump");
+
+ *suppress_warnings= FALSE;
// TODO if big log files: Change next to int8store()
int4store(buf, (ulong) mi->master_log_pos);
@@ -1475,7 +1538,7 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
now we just fill up the error log :-)
*/
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
- *suppress_warnings= 1; // Suppress reconnect warning
+ *suppress_warnings= TRUE; // Suppress reconnect warning
else
sql_print_error("Error on COM_BINLOG_DUMP: %d %s, will retry in %d secs",
mysql_errno(mysql), mysql_error(mysql),
@@ -1539,7 +1602,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
ulong len;
DBUG_ENTER("read_event");
- *suppress_warnings= 0;
+ *suppress_warnings= FALSE;
/*
my_real_read() will time us out
We check if we were told to die, and if not, try reading again
@@ -1879,6 +1942,94 @@ on this slave.\
}
+static bool check_io_slave_killed(THD *thd, MASTER_INFO *mi, const char *info)
+{
+ if (io_slave_killed(thd, mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information(info);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/**
+ @brief Try to reconnect slave IO thread.
+
+ @details Terminates current connection to master, sleeps for
+ @c mi->connect_retry msecs and initiates new connection with
+ @c safe_reconnect(). Variable pointed by @c retry_count is increased -
+ if it exceeds @c master_retry_count then connection is not re-established
+ and function signals error.
+ Unless @c suppres_warnings is TRUE, a warning is put in the server error log
+ when reconnecting. The warning message and messages used to report errors
+ are taken from @c messages array. In case @c master_retry_count is exceeded,
+ no messages are added to the log.
+
+ @param[in] thd Thread context.
+ @param[in] mysql MySQL connection.
+ @param[in] mi Master connection information.
+ @param[in,out] retry_count Number of attempts to reconnect.
+ @param[in] suppress_warnings TRUE when a normal net read timeout
+ has caused to reconnecting.
+ @param[in] messages Messages to print/log, see
+ reconnect_messages[] array.
+
+ @retval 0 OK.
+ @retval 1 There was an error.
+*/
+
+static int try_to_reconnect(THD *thd, MYSQL *mysql, MASTER_INFO *mi,
+ uint *retry_count, bool suppress_warnings,
+ const char *messages[SLAVE_RECON_MSG_MAX])
+{
+ mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
+ thd->proc_info= messages[SLAVE_RECON_MSG_WAIT];
+#ifdef SIGNAL_WITH_VIO_CLOSE
+ thd->clear_active_vio();
+#endif
+ end_server(mysql);
+ if ((*retry_count)++)
+ {
+ if (*retry_count > master_retry_count)
+ return 1; // Don't retry forever
+ safe_sleep(thd, mi->connect_retry, (CHECK_KILLED_FUNC) io_slave_killed,
+ (void *) mi);
+ }
+ if (check_io_slave_killed(thd, mi, messages[SLAVE_RECON_MSG_KILLED_WAITING]))
+ return 1;
+ thd->proc_info = messages[SLAVE_RECON_MSG_AFTER];
+ if (!suppress_warnings)
+ {
+ char buf[256], llbuff[22];
+ my_snprintf(buf, sizeof(buf), messages[SLAVE_RECON_MSG_FAILED],
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff));
+ /*
+ Raise a warining during registering on master/requesting dump.
+ Log a message reading event.
+ */
+ if (messages[SLAVE_RECON_MSG_COMMAND][0])
+ {
+ mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
+ ER(ER_SLAVE_MASTER_COM_FAILURE),
+ messages[SLAVE_RECON_MSG_COMMAND], buf);
+ }
+ else
+ {
+ sql_print_information(buf);
+ }
+ }
+ if (safe_reconnect(thd, mysql, mi, 1) || io_slave_killed(thd, mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information(messages[SLAVE_RECON_MSG_KILLED_AFTER]);
+ return 1;
+ }
+ return 0;
+}
+
+
/* Slave I/O Thread entry point */
pthread_handler_t handle_slave_io(void *arg)
@@ -1889,7 +2040,10 @@ pthread_handler_t handle_slave_io(void *arg)
RELAY_LOG_INFO *rli= &mi->rli;
char llbuff[22];
uint retry_count;
-
+ bool suppress_warnings;
+#ifndef DBUG_OFF
+ uint retry_count_reg= 0, retry_count_dump= 0, retry_count_event= 0;
+#endif
// needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff
my_thread_init();
DBUG_ENTER("handle_slave_io");
@@ -1975,79 +2129,56 @@ connected:
Register ourselves with the master.
*/
thd->proc_info = "Registering slave on master";
- if (register_slave_on_master(mysql, mi))
+ if (register_slave_on_master(mysql, mi, &suppress_warnings))
{
sql_print_error("Slave I/O thread couldn't register on master");
- goto err;
+ if (check_io_slave_killed(thd, mi, "Slave I/O thread killed while \
+registering slave on master") ||
+ try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_REG]))
+ goto err;
+ goto connected;
}
+ DBUG_EXECUTE_IF("FORCE_SLAVE_TO_RECONNECT_REG",
+ if (!retry_count_reg)
+ {
+ retry_count_reg++;
+ sql_print_information("Forcing to reconnect slave I/O thread");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_REG]))
+ goto err;
+ goto connected;
+ });
}
DBUG_PRINT("info",("Starting reading binary log from master"));
while (!io_slave_killed(thd,mi))
{
- bool suppress_warnings= 0;
thd->proc_info = "Requesting binlog dump";
if (request_dump(mysql, mi, &suppress_warnings))
{
sql_print_error("Failed on request_dump()");
- if (io_slave_killed(thd,mi))
- {
- sql_print_information("Slave I/O thread killed while requesting master \
-dump");
- goto err;
- }
-
- mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
- thd->proc_info= "Waiting to reconnect after a failed binlog dump request";
-#ifdef SIGNAL_WITH_VIO_CLOSE
- thd->clear_active_vio();
-#endif
- end_server(mysql);
- /*
- First time retry immediately, assuming that we can recover
- right away - if first time fails, sleep between re-tries
- hopefuly the admin can fix the problem sometime
- */
- if (retry_count++)
- {
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*)mi);
- }
- if (io_slave_killed(thd,mi))
- {
- sql_print_information("Slave I/O thread killed while retrying master \
-dump");
- goto err;
- }
-
- thd->proc_info = "Reconnecting after a failed binlog dump request";
- if (!suppress_warnings) {
- char buf[256];
- my_snprintf(buf, sizeof(buf),
- "failed dump request, reconnecting to try again,"
- " log '%s' at postion %s",
- IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
- mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
- ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_BINLOG_DUMP", buf);
- }
- if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
- {
- sql_print_information("Slave I/O thread killed during or \
-after reconnect");
+ if (check_io_slave_killed(thd, mi, "Slave I/O thread killed while \
+requesting master dump") ||
+ try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_DUMP]))
goto err;
- }
-
goto connected;
}
+ DBUG_EXECUTE_IF("FORCE_SLAVE_TO_RECONNECT_DUMP",
+ if (!retry_count_dump)
+ {
+ retry_count_dump++;
+ sql_print_information("Forcing to reconnect slave I/O thread");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_DUMP]))
+ goto err;
+ goto connected;
+ });
while (!io_slave_killed(thd,mi))
{
ulong event_len;
- suppress_warnings= 0;
/*
We say "waiting" because read_event() will wait if there's nothing to
read. But if there's something to read, it will not wait. The
@@ -2056,12 +2187,19 @@ after reconnect");
*/
thd->proc_info= "Waiting for master to send event";
event_len= read_event(mysql, mi, &suppress_warnings);
- if (io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while reading event");
+ if (check_io_slave_killed(thd, mi, "Slave I/O thread killed while \
+reading event"))
goto err;
- }
+ DBUG_EXECUTE_IF("FORCE_SLAVE_TO_RECONNECT_EVENT",
+ if (!retry_count_event)
+ {
+ retry_count_event++;
+ sql_print_information("Forcing to reconnect slave I/O thread");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_EVENT]))
+ goto err;
+ goto connected;
+ });
if (event_len == packet_error)
{
@@ -2081,39 +2219,9 @@ max_allowed_packet",
mysql_error(mysql));
goto err;
}
- mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
- thd->proc_info = "Waiting to reconnect after a failed master event read";
-#ifdef SIGNAL_WITH_VIO_CLOSE
- thd->clear_active_vio();
-#endif
- end_server(mysql);
- if (retry_count++)
- {
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*) mi);
- }
- if (io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while waiting to \
-reconnect after a failed read");
- goto err;
- }
- thd->proc_info = "Reconnecting after a failed master event read";
- if (!suppress_warnings)
- sql_print_information("Slave I/O thread: Failed reading log event, \
-reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos, llbuff));
- if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed during or after a \
-reconnect done to recover from failed read");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_EVENT]))
goto err;
- }
goto connected;
} // if (event_len == packet_error)
diff --git a/sql/spatial.cc b/sql/spatial.cc
index e0680ed182c..97e5fcfa27a 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -17,7 +17,28 @@
#ifdef HAVE_SPATIAL
-#define MAX_DIGITS_IN_DOUBLE 16
+/*
+ exponential notation :
+ 1 sign
+ 1 number before the decimal point
+ 1 decimal point
+ 14 number of significant digits (see String::qs_append(double))
+ 1 'e' sign
+ 1 exponent sign
+ 3 exponent digits
+ ==
+ 22
+
+ "f" notation :
+ 1 optional 0
+ 1 sign
+ 14 number significant digits (see String::qs_append(double) )
+ 1 decimal point
+ ==
+ 17
+*/
+
+#define MAX_DIGITS_IN_DOUBLE 22
/***************************** Gis_class_info *******************************/
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 026e1c3b218..fb5fd2ec627 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -28,6 +28,8 @@
#include <io.h>
#endif
+#define FLAGSTR(S,F) ((S) & (F) ? #F " " : "")
+
/**
This internal handler is used to trap internally
errors that can occur when executing open table
@@ -3996,36 +3998,47 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
{
if (mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG))
{
- handler::Table_flags binlog_flags= ~handler::Table_flags();
+ handler::Table_flags flags_some_set= handler::Table_flags();
+ handler::Table_flags flags_all_set= ~handler::Table_flags();
+ my_bool multi_engine= FALSE;
+ void* prev_ht= NULL;
for (TABLE_LIST *table= tables; table; table= table->next_global)
+ {
if (!table->placeholder() && table->lock_type >= TL_WRITE_ALLOW_WRITE)
{
-#define FLAGSTR(S,F) ((S) & (F) ? #F " " : "")
-#ifndef DBUG_OFF
- ulonglong flags= table->table->file->ha_table_flags();
+ ulonglong const flags= table->table->file->ha_table_flags();
DBUG_PRINT("info", ("table: %s; ha_table_flags: %s%s",
table->table_name,
FLAGSTR(flags, HA_BINLOG_STMT_CAPABLE),
FLAGSTR(flags, HA_BINLOG_ROW_CAPABLE)));
-#endif
- binlog_flags &= table->table->file->ha_table_flags();
+ if (prev_ht && prev_ht != table->table->file->ht)
+ multi_engine= TRUE;
+ prev_ht= table->table->file->ht;
+ flags_all_set &= flags;
+ flags_some_set |= flags;
}
- binlog_flags&= HA_BINLOG_FLAGS;
- DBUG_PRINT("info", ("binlog_flags: %s%s",
- FLAGSTR(binlog_flags, HA_BINLOG_STMT_CAPABLE),
- FLAGSTR(binlog_flags, HA_BINLOG_ROW_CAPABLE)));
+ }
+
+ DBUG_PRINT("info", ("flags_all_set: %s%s",
+ FLAGSTR(flags_all_set, HA_BINLOG_STMT_CAPABLE),
+ FLAGSTR(flags_all_set, HA_BINLOG_ROW_CAPABLE)));
+ DBUG_PRINT("info", ("flags_some_set: %s%s",
+ FLAGSTR(flags_some_set, HA_BINLOG_STMT_CAPABLE),
+ FLAGSTR(flags_some_set, HA_BINLOG_ROW_CAPABLE)));
DBUG_PRINT("info", ("thd->variables.binlog_format: %ld",
thd->variables.binlog_format));
+ DBUG_PRINT("info", ("multi_engine: %s",
+ multi_engine ? "TRUE" : "FALSE"));
int error= 0;
- if (binlog_flags == 0)
+ if (flags_all_set == 0)
{
my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0),
"Statement cannot be logged to the binary log in"
" row-based nor statement-based format");
}
else if (thd->variables.binlog_format == BINLOG_FORMAT_STMT &&
- (binlog_flags & HA_BINLOG_STMT_CAPABLE) == 0)
+ (flags_all_set & HA_BINLOG_STMT_CAPABLE) == 0)
{
my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0),
"Statement-based format required for this statement,"
@@ -4033,13 +4046,29 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
}
else if ((thd->variables.binlog_format == BINLOG_FORMAT_ROW ||
thd->lex->is_stmt_unsafe()) &&
- (binlog_flags & HA_BINLOG_ROW_CAPABLE) == 0)
+ (flags_all_set & HA_BINLOG_ROW_CAPABLE) == 0)
{
my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0),
"Row-based format required for this statement,"
" but not allowed by this combination of engines");
}
+ /*
+ If more than one engine is involved in the statement and at
+ least one is doing it's own logging (is *self-logging*), the
+ statement cannot be logged atomically, so we generate an error
+ rather than allowing the binlog to become corrupt.
+ */
+ if (multi_engine &&
+ (flags_some_set & HA_HAS_OWN_BINLOGGING))
+ {
+ error= ER_BINLOG_LOGGING_IMPOSSIBLE;
+ my_error(error, MYF(0),
+ "Statement cannot be written atomically since more"
+ " than one engine involved and at least one engine"
+ " is self-logging");
+ }
+
DBUG_PRINT("info", ("error: %d", error));
if (error)
@@ -4061,7 +4090,7 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
here.
*/
if (thd->lex->is_stmt_unsafe() ||
- (binlog_flags & HA_BINLOG_STMT_CAPABLE) == 0)
+ (flags_all_set & HA_BINLOG_STMT_CAPABLE) == 0)
{
thd->set_current_stmt_binlog_row_based_if_mixed();
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 0e4404b87fc..f7ad024c143 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -820,6 +820,18 @@ void query_cache_invalidate_by_MyISAM_filename(const char *filename)
}
+/*
+ The following function forms part of the C plugin API
+*/
+extern "C"
+void mysql_query_cache_invalidate4(THD *thd,
+ const char *key, unsigned key_length,
+ int using_trx)
+{
+ query_cache.invalidate(thd, key, (uint32) key_length, (my_bool) using_trx);
+}
+
+
/*****************************************************************************
Query_cache methods
*****************************************************************************/
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 40b37ed7405..4945b805578 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -207,6 +207,31 @@ Open_tables_state::Open_tables_state(ulong version_arg)
The following functions form part of the C plugin API
*/
+extern "C" int mysql_tmpfile(const char *prefix)
+{
+ char filename[FN_REFLEN];
+ File fd = create_temp_file(filename, mysql_tmpdir, prefix,
+#ifdef __WIN__
+ O_BINARY | O_TRUNC | O_SEQUENTIAL |
+ O_SHORT_LIVED |
+#endif /* __WIN__ */
+ O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY,
+ MYF(MY_WME));
+ if (fd >= 0) {
+#ifndef __WIN__
+ /*
+ This can be removed once the following bug is fixed:
+ Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option
+ (file not removed) (Unix)
+ */
+ unlink(filename);
+#endif /* !__WIN__ */
+ }
+
+ return fd;
+}
+
+
extern "C"
int thd_in_lock_tables(const THD *thd)
{
@@ -253,6 +278,11 @@ int thd_tx_isolation(const THD *thd)
return (int) thd->variables.tx_isolation;
}
+extern "C"
+void thd_inc_row_count(THD *thd)
+{
+ thd->row_count++;
+}
/*
Dumps a text description of a thread, its security context
@@ -483,6 +513,49 @@ void THD::pop_internal_handler()
m_internal_handler= NULL;
}
+extern "C"
+void *thd_alloc(MYSQL_THD thd, unsigned int size)
+{
+ return thd->alloc(size);
+}
+
+extern "C"
+void *thd_calloc(MYSQL_THD thd, unsigned int size)
+{
+ return thd->calloc(size);
+}
+
+extern "C"
+char *thd_strdup(MYSQL_THD thd, const char *str)
+{
+ return thd->strdup(str);
+}
+
+extern "C"
+char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size)
+{
+ return thd->strmake(str, size);
+}
+
+extern "C"
+LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
+ const char *str, unsigned int size,
+ int allocate_lex_string)
+{
+ return thd->make_lex_string(lex_str, str, size,
+ (bool) allocate_lex_string);
+}
+
+extern "C"
+void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size)
+{
+ return thd->memdup(str, size);
+}
+
+void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
+{
+ *xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
+}
/*
Init common variables that has to be reset on start and on change_user
@@ -851,6 +924,30 @@ void THD::cleanup_after_query()
}
+/**
+ Create a LEX_STRING in this connection
+
+ @param lex_str pointer to LEX_STRING object to be initialized
+ @param str initializer to be copied into lex_str
+ @param length length of str, in bytes
+ @param allocate_lex_string if TRUE, allocate new LEX_STRING object,
+ instead of using lex_str value
+ @return NULL on failure, or pointer to the LEX_STRING object
+*/
+LEX_STRING *THD::make_lex_string(LEX_STRING *lex_str,
+ const char* str, uint length,
+ bool allocate_lex_string)
+{
+ if (allocate_lex_string)
+ if (!(lex_str= (LEX_STRING *)alloc(sizeof(LEX_STRING))))
+ return 0;
+ if (!(lex_str->str= strmake_root(mem_root, str, length)))
+ return 0;
+ lex_str->length= length;
+ return lex_str;
+}
+
+
/*
Convert a string to another character set
@@ -1445,6 +1542,8 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
field_sep_char= (exchange->enclosed->length() ? (*exchange->enclosed)[0] :
field_term_length ? (*exchange->field_term)[0] : INT_MAX);
escape_char= (exchange->escaped->length() ? (*exchange->escaped)[0] : -1);
+ is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char));
+ is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char));
line_sep_char= (exchange->line_term->length() ?
(*exchange->line_term)[0] : INT_MAX);
if (!field_term_length)
@@ -1519,7 +1618,8 @@ bool select_export::send_data(List<Item> &items)
used_length=min(res->length(),item->max_length);
else
used_length=res->length();
- if (result_type == STRING_RESULT && escape_char != -1)
+ if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
+ escape_char != -1)
{
char *pos, *start, *end;
CHARSET_INFO *res_charset= res->charset();
@@ -1585,7 +1685,9 @@ bool select_export::send_data(List<Item> &items)
NEED_ESCAPING(pos[1])))
{
char tmp_buff[2];
- tmp_buff[0]= escape_char;
+ tmp_buff[0]= ((int) *pos == field_sep_char &&
+ is_ambiguous_field_sep) ?
+ field_sep_char : escape_char;
tmp_buff[1]= *pos ? *pos : '0';
if (my_b_write(&cache,(uchar*) start,(uint) (pos-start)) ||
my_b_write(&cache,(uchar*) tmp_buff,2))
@@ -2436,7 +2538,43 @@ void THD::restore_backup_open_tables_state(Open_tables_state *backup)
DBUG_VOID_RETURN;
}
+/**
+ Check the killed state of a user thread
+ @param thd user thread
+ @retval 0 the user thread is active
+ @retval 1 the user thread has been killed
+*/
+extern "C" int thd_killed(const MYSQL_THD thd)
+{
+ return(thd->killed);
+}
+
+#ifdef INNODB_COMPATIBILITY_HOOKS
+extern "C" struct charset_info_st *thd_charset(MYSQL_THD thd)
+{
+ return(thd->charset());
+}
+
+extern "C" char **thd_query(MYSQL_THD thd)
+{
+ return(&thd->query);
+}
+extern "C" int thd_slave_thread(const MYSQL_THD thd)
+{
+ return(thd->slave_thread);
+}
+
+extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
+{
+ return(thd->no_trans_update.all);
+}
+
+extern "C" int thd_binlog_format(const THD *thd)
+{
+ return (int) thd->variables.binlog_format;
+}
+#endif // INNODB_COMPATIBILITY_HOOKS */
/****************************************************************************
Handling of statement states in functions and triggers.
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 0dad4966623..7466ab69ed8 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -39,8 +39,6 @@ enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
-enum enum_check_fields
-{ CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL };
enum enum_mark_columns
{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
@@ -492,13 +490,6 @@ public:
{ return strdup_root(mem_root,str); }
inline char *strmake(const char *str, size_t size)
{ return strmake_root(mem_root,str,size); }
- inline bool LEX_STRING_make(LEX_STRING *lex_str, const char *str,
- size_t size)
- {
- return ((lex_str->str=
- strmake_root(mem_root, str, (lex_str->length= size)))) == 0;
- }
-
inline void *memdup(const void *str, size_t size)
{ return memdup_root(mem_root,str,size); }
inline void *memdup_w_gap(const void *str, size_t size, uint gap)
@@ -1596,6 +1587,10 @@ public:
return alloc_root(&transaction.mem_root,size);
}
+ LEX_STRING *make_lex_string(LEX_STRING *lex_str,
+ const char* str, uint length,
+ bool allocate_lex_string);
+
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
const char *from, uint from_length,
CHARSET_INFO *from_cs);
@@ -1949,9 +1944,30 @@ public:
};
+#define ESCAPE_CHARS "ntrb0ZN" // keep synchronous with READ_INFO::unescape
+
+
+/*
+ List of all possible characters of a numeric value text representation.
+*/
+#define NUMERIC_CHARS ".0123456789e+-"
+
+
class select_export :public select_to_file {
uint field_term_length;
int field_sep_char,escape_char,line_sep_char;
+ /*
+ The is_ambiguous_field_sep field is true if a value of the field_sep_char
+ field is one of the 'n', 't', 'r' etc characters
+ (see the READ_INFO::unescape method and the ESCAPE_CHARS constant value).
+ */
+ bool is_ambiguous_field_sep;
+ /*
+ The is_unsafe_field_sep field is true if a value of the field_sep_char
+ field is one of the '0'..'9', '+', '-', '.' and 'e' characters
+ (see the NUMERIC_CHARS constant value).
+ */
+ bool is_unsafe_field_sep;
bool fixed_row_size;
public:
select_export(sql_exchange *ex) :select_to_file(ex) {}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index eeac5e7c4fe..11db88d8f5e 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -695,6 +695,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ if (duplic == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@@ -2546,6 +2548,8 @@ bool Delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
using_opt_replace= 1;
}
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@@ -2890,6 +2894,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd->no_trans_update.stmt= FALSE;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@@ -3481,6 +3487,8 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update.stmt= FALSE;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 0138030487b..3ffbdf83815 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -851,6 +851,7 @@ continue_loop:;
char
READ_INFO::unescape(char chr)
{
+ /* keep this switch synchornous with the ESCAPE_CHARS macro */
switch(chr) {
case 'n': return '\n';
case 't': return '\t';
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index cb523147ede..48177a5f350 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1025,8 +1025,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
HA_CREATE_INFO create_info;
status_var_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB]);
- if (thd->LEX_STRING_make(&db, packet, packet_length -1) ||
- thd->LEX_STRING_make(&alias, db.str, db.length) ||
+ if (thd->make_lex_string(&db, packet, packet_length - 1, FALSE) ||
+ thd->make_lex_string(&alias, db.str, db.length, FALSE) ||
check_db_name(&db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
@@ -1046,7 +1046,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
status_var_increment(thd->status_var.com_stat[SQLCOM_DROP_DB]);
LEX_STRING db;
- if (thd->LEX_STRING_make(&db, packet, packet_length - 1) ||
+ if (thd->make_lex_string(&db, packet, packet_length - 1, FALSE) ||
check_db_name(&db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
@@ -1323,7 +1323,7 @@ void log_slow_statement(THD *thd)
thd->variables.long_query_time ||
((thd->server_status &
(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) &&
- (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES)))
+ opt_log_queries_not_using_indexes))
{
thd->status_var.long_query_count++;
slow_log_print(thd, thd->query, thd->query_length, start_of_query);
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index f3253e5b086..0584bb12b9e 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -523,6 +523,7 @@ static bool set_up_field_array(TABLE *table,
SYNOPSIS
create_full_part_field_array()
+ thd Thread handle
table TABLE object for which partition fields are set-up
part_info Reference to partitioning data structure
@@ -537,11 +538,12 @@ static bool set_up_field_array(TABLE *table,
This function is called from fix_partition_func
*/
-static bool create_full_part_field_array(TABLE *table,
+static bool create_full_part_field_array(THD *thd, TABLE *table,
partition_info *part_info)
{
bool result= FALSE;
Field **ptr;
+ my_bitmap_map *bitmap_buf;
DBUG_ENTER("create_full_part_field_array");
if (!part_info->is_sub_partitioned())
@@ -578,6 +580,35 @@ static bool create_full_part_field_array(TABLE *table,
part_info->full_part_field_array= field_array;
part_info->no_full_part_fields= no_part_fields;
}
+
+ /*
+ Initialize the set of all fields used in partition and subpartition
+ expression. Required for testing of partition fields in write_set
+ when updating. We need to set all bits in read_set because the row
+ may need to be inserted in a different [sub]partition.
+ */
+ if (!(bitmap_buf= (my_bitmap_map*)
+ thd->alloc(bitmap_buffer_size(table->s->fields))))
+ {
+ mem_alloc_error(bitmap_buffer_size(table->s->fields));
+ result= TRUE;
+ goto end;
+ }
+ if (bitmap_init(&part_info->full_part_field_set, bitmap_buf,
+ table->s->fields, FALSE))
+ {
+ mem_alloc_error(table->s->fields);
+ result= TRUE;
+ goto end;
+ }
+ /*
+ full_part_field_array may be NULL if storage engine supports native
+ partitioning.
+ */
+ if ((ptr= part_info->full_part_field_array))
+ for (; *ptr; ptr++)
+ bitmap_set_bit(&part_info->full_part_field_set, (*ptr)->field_index);
+
end:
DBUG_RETURN(result);
}
@@ -1636,7 +1667,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
- if (unlikely(create_full_part_field_array(table, part_info)))
+ if (unlikely(create_full_part_field_array(thd, table, part_info)))
goto end;
if (unlikely(check_primary_key(table)))
goto end;
@@ -2869,6 +2900,8 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
}
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
+ if (left_endpoint && !include_endpoint)
+ part_func_value++;
while (max_part_id > min_part_id)
{
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
@@ -3294,7 +3327,9 @@ static uint32 get_sub_part_id_from_key(const TABLE *table,uchar *buf,
key_restore(buf, (uchar*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
+ {
part_id= part_info->get_subpartition_id(part_info);
+ }
else
{
Field **part_field_array= part_info->subpart_field_array;
@@ -3337,8 +3372,10 @@ bool get_part_id_from_key(const TABLE *table, uchar *buf, KEY *key_info,
key_restore(buf, (uchar*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
+ {
result= part_info->get_part_partition_id(part_info, part_id,
&func_value);
+ }
else
{
Field **part_field_array= part_info->part_field_array;
@@ -3384,8 +3421,10 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf,
key_restore(buf, (uchar*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
+ {
result= part_info->get_partition_id(part_info, &part_spec->start_part,
&func_value);
+ }
else
{
Field **part_field_array= part_info->full_part_field_array;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 1616e895107..5d9d30b6020 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1442,13 +1442,11 @@ err:
}
if (errmsg)
- {
my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0),
"SHOW BINLOG EVENTS", errmsg);
- DBUG_RETURN(TRUE);
- }
+ else
+ send_eof(thd);
- send_eof(thd);
pthread_mutex_lock(&LOCK_thread_count);
thd->current_linfo = 0;
pthread_mutex_unlock(&LOCK_thread_count);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 04374c1d1c1..b65515c9e01 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2176,20 +2176,6 @@ void calc_sum_of_all_status(STATUS_VAR *to)
}
-LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
- const char* str, uint length,
- bool allocate_lex_string)
-{
- MEM_ROOT *mem= thd->mem_root;
- if (allocate_lex_string)
- if (!(lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING))))
- return 0;
- lex_str->str= strmake_root(mem, str, length);
- lex_str->length= length;
- return lex_str;
-}
-
-
/* INFORMATION_SCHEMA name */
LEX_STRING INFORMATION_SCHEMA_NAME= { C_STRING_WITH_LEN("information_schema")};
@@ -2274,11 +2260,9 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
if (item->type() == Item::FUNC_ITEM)
{
Item_func *item_func= (Item_func*)item;
- Item **child;
- Item **item_end= (item_func->arguments()) + item_func->argument_count();
- for (child= item_func->arguments(); child != item_end; child++)
+ for (uint i=0; i<item_func->argument_count(); i++)
{
- if (!uses_only_table_name_fields(*child, table))
+ if (!uses_only_table_name_fields(item_func->arguments()[i], table))
return 0;
}
}
@@ -5195,10 +5179,10 @@ int make_schema_select(THD *thd, SELECT_LEX *sel,
We have to make non const db_name & table_name
because of lower_case_table_names
*/
- make_lex_string(thd, &db, INFORMATION_SCHEMA_NAME.str,
- INFORMATION_SCHEMA_NAME.length, 0);
- make_lex_string(thd, &table, schema_table->table_name,
- strlen(schema_table->table_name), 0);
+ thd->make_lex_string(&db, INFORMATION_SCHEMA_NAME.str,
+ INFORMATION_SCHEMA_NAME.length, 0);
+ thd->make_lex_string(&table, schema_table->table_name,
+ strlen(schema_table->table_name), 0);
if (schema_table->old_format(thd, schema_table) || /* Handle old syntax */
!sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0),
0, 0, TL_READ))
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index da28ca07e2c..1e9322f7f5b 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -50,6 +50,12 @@ typedef struct st_buffpek { /* Struktur om sorteringsbuffrarna */
ulong max_keys; /* Max keys in buffert */
} BUFFPEK;
+struct BUFFPEK_COMPARE_CONTEXT
+{
+ qsort_cmp2 key_compare;
+ void *key_compare_arg;
+};
+
typedef struct st_sort_param {
uint rec_length; /* Length of sorted records */
uint sort_length; /* Length of sorted columns */
@@ -65,6 +71,9 @@ typedef struct st_sort_param {
uchar *unique_buff;
bool not_killable;
char* tmp_buffer;
+ /* The fields below are used only by Unique class */
+ qsort2_cmp compare;
+ BUFFPEK_COMPARE_CONTEXT cmp_context;
} SORTPARAM;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 81dd8aebf65..df83e1d2e25 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -4107,8 +4107,7 @@ part_bit_expr:
}
Lex->part_info->curr_part_elem->has_null_value= TRUE;
}
- else if (part_expr->result_type() != INT_RESULT &&
- !part_expr->null_value)
+ else if (part_expr->result_type() != INT_RESULT)
{
my_parse_error(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
MYSQL_YYABORT;
@@ -7139,8 +7138,11 @@ opt_distinct:
|DISTINCT { $$ = 1; };
opt_gconcat_separator:
- /* empty */ { $$ = new (YYTHD->mem_root) String(",",1,default_charset_info); }
- |SEPARATOR_SYM text_string { $$ = $2; };
+ /* empty */
+ {
+ $$= new (YYTHD->mem_root) String(",", 1, &my_charset_latin1);
+ }
+ | SEPARATOR_SYM text_string { $$ = $2; };
opt_gorder_clause:
diff --git a/sql/stacktrace.c b/sql/stacktrace.c
index 40507907120..b1267e20774 100644
--- a/sql/stacktrace.c
+++ b/sql/stacktrace.c
@@ -228,6 +228,15 @@ void write_core(int sig)
void write_core(int sig)
{
signal(sig, SIG_DFL);
+#ifdef HAVE_gcov
+ /*
+ For GCOV build, crashing will prevent the writing of code coverage
+ information from this process, causing gcov output to be incomplete.
+ So we force the writing of coverage information here before terminating.
+ */
+ extern void __gcov_flush(void);
+ __gcov_flush();
+#endif
pthread_kill(pthread_self(), sig);
#if defined(P_MYID) && !defined(SCO)
/* On Solaris, the above kill is not enough */
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 7a05ceaddfc..0394eee9c6d 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -360,17 +360,12 @@ Unique::reset()
}
/*
- The comparison function, passed to queue_init() in merge_walk() must
+ The comparison function, passed to queue_init() in merge_walk() and in
+ merge_buffers() when the latter is called from Uniques::get() must
use comparison function of Uniques::tree, but compare members of struct
BUFFPEK.
*/
-struct BUFFPEK_COMPARE_CONTEXT
-{
- qsort_cmp2 key_compare;
- void *key_compare_arg;
-};
-
C_MODE_START
static int buffpek_compare(void *arg, uchar *key_ptr1, uchar *key_ptr2)
@@ -629,6 +624,10 @@ bool Unique::get(TABLE *table)
sort_param.unique_buff= sort_buffer+(sort_param.keys*
sort_param.sort_length);
+ sort_param.compare= (qsort2_cmp) buffpek_compare;
+ sort_param.cmp_context.key_compare= tree.compare;
+ sort_param.cmp_context.key_compare_arg= tree.custom_arg;
+
/* Merge the buffers to one file, removing duplicates */
if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&file))
goto err;
diff --git a/sql/unireg.h b/sql/unireg.h
index 5b73c6e9caa..6c26f30f116 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -124,7 +124,7 @@
#define SPECIAL_NO_HOST_CACHE 512 /* Don't cache hosts */
#define SPECIAL_SHORT_LOG_FORMAT 1024
#define SPECIAL_SAFE_MODE 2048
-#define SPECIAL_LOG_QUERIES_NOT_USING_INDEXES 4096 /* Log q not using indexes */
+#define SPECIAL_LOG_QUERIES_NOT_USING_INDEXES 4096 /* Obsolete */
/* Extern defines */
#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->s->reclength)