summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <knielsen@knielsen-hq.org>2010-04-28 14:52:24 +0200
committerunknown <knielsen@knielsen-hq.org>2010-04-28 14:52:24 +0200
commitb1e00b6be81c80b09d11085d77d86978e26df988 (patch)
treebb1fdd7363fbf2580572ac9a56dbd4c933cc4c0d /sql
parent1f683a7270e63abfadce20c6f51370621ff065e1 (diff)
parentc9cfd2df5f2f58c2cdf716999ebea252c307333f (diff)
downloadmariadb-git-b1e00b6be81c80b09d11085d77d86978e26df988.tar.gz
Merge MySQL 5.1.46 into MariaDB.
Still two test failures to be solved: main.myisam and main.subselect.
Diffstat (limited to 'sql')
-rw-r--r--sql/debug_sync.cc38
-rw-r--r--sql/debug_sync.h1
-rw-r--r--sql/events.cc4
-rw-r--r--sql/field.cc87
-rw-r--r--sql/field.h21
-rw-r--r--sql/field_conv.cc26
-rw-r--r--sql/ha_partition.cc49
-rw-r--r--sql/item.cc93
-rw-r--r--sql/item.h27
-rw-r--r--sql/item_cmpfunc.cc45
-rw-r--r--sql/item_cmpfunc.h22
-rw-r--r--sql/item_create.cc30
-rw-r--r--sql/item_create.h8
-rw-r--r--sql/item_func.cc74
-rw-r--r--sql/item_row.cc11
-rw-r--r--sql/item_row.h4
-rw-r--r--sql/item_strfunc.cc2
-rw-r--r--sql/item_strfunc.h3
-rw-r--r--sql/item_subselect.cc8
-rw-r--r--sql/item_sum.cc16
-rw-r--r--sql/item_sum.h6
-rw-r--r--sql/item_timefunc.cc2
-rw-r--r--sql/log.cc34
-rw-r--r--sql/log_event.cc31
-rw-r--r--sql/log_event.h32
-rw-r--r--sql/log_event_old.cc76
-rw-r--r--sql/mysql_priv.h29
-rw-r--r--sql/mysqld.cc247
-rw-r--r--sql/opt_range.cc15
-rw-r--r--sql/opt_sum.cc113
-rw-r--r--sql/protocol.cc4
-rw-r--r--sql/rpl_utility.cc2
-rw-r--r--sql/rpl_utility.h11
-rw-r--r--sql/set_var.cc11
-rw-r--r--sql/share/errmsg.txt2
-rw-r--r--sql/slave.cc32
-rw-r--r--sql/sp.cc4
-rw-r--r--sql/sp_cache.cc5
-rw-r--r--sql/sp_head.cc4
-rw-r--r--sql/sql_base.cc17
-rw-r--r--sql/sql_class.cc10
-rw-r--r--sql/sql_class.h3
-rw-r--r--sql/sql_delete.cc18
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_lex.cc1
-rw-r--r--sql/sql_lex.h3
-rw-r--r--sql/sql_load.cc8
-rw-r--r--sql/sql_parse.cc6
-rw-r--r--sql/sql_partition.cc20
-rw-r--r--sql/sql_plugin.cc2
-rw-r--r--sql/sql_profile.cc12
-rw-r--r--sql/sql_repl.cc1
-rw-r--r--sql/sql_select.cc383
-rw-r--r--sql/sql_select.h39
-rw-r--r--sql/sql_show.cc3
-rw-r--r--sql/sql_table.cc125
-rw-r--r--sql/sql_trigger.cc15
-rw-r--r--sql/sql_update.cc97
-rw-r--r--sql/sql_view.cc45
-rw-r--r--sql/sql_yacc.yy6
-rw-r--r--sql/table.cc11
-rw-r--r--sql/table.h14
62 files changed, 1362 insertions, 708 deletions
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 2580d526b52..23a649a89fa 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1903,4 +1903,42 @@ void debug_sync(THD *thd, const char *sync_point_name, size_t name_len)
DBUG_VOID_RETURN;
}
+/**
+ Define debug sync action.
+
+ @param[in] thd thread handle
+ @param[in] action_str action string
+
+ @return status
+ @retval FALSE ok
+ @retval TRUE error
+
+ @description
+ The function is similar to @c debug_sync_eval_action but is
+ to be called immediately from the server code rather than
+ to be triggered by setting a value to DEBUG_SYNC system variable.
+
+ @note
+ The input string is copied prior to be fed to
+ @c debug_sync_eval_action to let the latter modify it.
+
+ Caution.
+ The function allocates in THD::mem_root and therefore
+ is not recommended to be deployed inside big loops.
+*/
+
+bool debug_sync_set_action(THD *thd, const char *action_str, size_t len)
+{
+ bool rc;
+ char *value;
+ DBUG_ENTER("debug_sync_set_action");
+ DBUG_ASSERT(thd);
+ DBUG_ASSERT(action_str);
+
+ value= strmake_root(thd->mem_root, action_str, len);
+ rc= debug_sync_eval_action(thd, value);
+ DBUG_RETURN(rc);
+}
+
+
#endif /* defined(ENABLED_DEBUG_SYNC) */
diff --git a/sql/debug_sync.h b/sql/debug_sync.h
index f4cd0b364cf..9ac7da39d4d 100644
--- a/sql/debug_sync.h
+++ b/sql/debug_sync.h
@@ -50,6 +50,7 @@ extern void debug_sync_end(void);
extern void debug_sync_init_thread(THD *thd);
extern void debug_sync_end_thread(THD *thd);
extern void debug_sync(THD *thd, const char *sync_point_name, size_t name_len);
+extern bool debug_sync_set_action(THD *thd, const char *action_str, size_t len);
#else /* defined(ENABLED_DEBUG_SYNC) */
diff --git a/sql/events.cc b/sql/events.cc
index 31eddada282..0f3fc8eee4a 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -361,7 +361,9 @@ create_query_string(THD *thd, String *buf)
/* Append definer */
append_definer(thd, buf, &(thd->lex->definer->user), &(thd->lex->definer->host));
/* Append the left part of thd->query after "DEFINER" part */
- if (buf->append(thd->lex->stmt_definition_begin))
+ if (buf->append(thd->lex->stmt_definition_begin,
+ thd->lex->stmt_definition_end -
+ thd->lex->stmt_definition_begin))
return 1;
return 0;
diff --git a/sql/field.cc b/sql/field.cc
index ac095b07117..bc6da99166c 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1374,12 +1374,14 @@ bool Field::send_binary(Protocol *protocol)
to the size of this field (the slave or destination).
@param field_metadata Encoded size in field metadata
+ @param mflags Flags from the table map event for the table.
@retval 0 if this field's size is < the source field's size
@retval 1 if this field's size is >= the source field's size
*/
int Field::compatible_field_size(uint field_metadata,
- const Relay_log_info *rli_arg __attribute__((unused)))
+ const Relay_log_info *rli_arg __attribute__((unused)),
+ uint16 mflags __attribute__((unused)))
{
uint const source_size= pack_length_from_metadata(field_metadata);
uint const destination_size= row_pack_length();
@@ -1706,11 +1708,10 @@ uint Field::fill_cache_field(CACHE_FIELD *copy)
uint store_length;
copy->str=ptr;
copy->length=pack_length();
- copy->blob_field=0;
+ copy->field= this;
if (flags & BLOB_FLAG)
{
- copy->blob_field=(Field_blob*) this;
- copy->strip=0;
+ copy->type= CACHE_BLOB;
copy->length-= table->s->blob_ptr_size;
return copy->length;
}
@@ -1718,15 +1719,15 @@ uint Field::fill_cache_field(CACHE_FIELD *copy)
(type() == MYSQL_TYPE_STRING && copy->length >= 4 &&
copy->length < 256))
{
- copy->strip=1; /* Remove end space */
+ copy->type= CACHE_STRIPPED;
store_length= 2;
}
else
{
- copy->strip=0;
+ copy->type= 0;
store_length= 0;
}
- return copy->length+ store_length;
+ return copy->length + store_length;
}
@@ -2881,7 +2882,8 @@ uint Field_new_decimal::pack_length_from_metadata(uint field_metadata)
@retval 1 if this field's size is >= the source field's size
*/
int Field_new_decimal::compatible_field_size(uint field_metadata,
- const Relay_log_info * __attribute__((unused)))
+ const Relay_log_info * __attribute__((unused)),
+ uint16 mflags __attribute__((unused)))
{
int compatible= 0;
uint const source_precision= (field_metadata >> 8U) & 0x00ff;
@@ -2946,16 +2948,16 @@ Field_new_decimal::unpack(uchar* to,
a decimal and write that to the raw data buffer.
*/
decimal_digit_t dec_buf[DECIMAL_MAX_PRECISION];
- decimal_t dec;
- dec.len= from_precision;
- dec.buf= dec_buf;
+ decimal_t dec_val;
+ dec_val.len= from_precision;
+ dec_val.buf= dec_buf;
/*
Note: bin2decimal does not change the length of the field. So it is
just the first step the resizing operation. The second step does the
resizing using the precision and decimals from the slave.
*/
- bin2decimal((uchar *)from, &dec, from_precision, from_decimal);
- decimal2bin(&dec, to, precision, decimals());
+ bin2decimal((uchar *)from, &dec_val, from_precision, from_decimal);
+ decimal2bin(&dec_val, to, precision, decimals());
}
else
memcpy(to, from, len); // Sizes are the same, just copy the data.
@@ -6334,7 +6336,7 @@ check_string_copy_error(Field_str *field,
SYNOPSIS
Field_longstr::report_if_important_data()
- ptr - Truncated rest of string
+ pstr - Truncated rest of string
end - End of truncated string
count_spaces - Treat traling spaces as important data
@@ -6350,12 +6352,12 @@ check_string_copy_error(Field_str *field,
*/
int
-Field_longstr::report_if_important_data(const char *ptr, const char *end,
+Field_longstr::report_if_important_data(const char *pstr, const char *end,
bool count_spaces)
{
- if ((ptr < end) && table->in_use->count_cuted_fields)
+ if ((pstr < end) && table->in_use->count_cuted_fields)
{
- if (test_if_important_data(field_charset, ptr, end))
+ if (test_if_important_data(field_charset, pstr, end))
{
if (table->in_use->abort_on_warning)
set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
@@ -6661,7 +6663,8 @@ check_field_for_37426(const void *param_arg)
#endif
int Field_string::compatible_field_size(uint field_metadata,
- const Relay_log_info *rli_arg)
+ const Relay_log_info *rli_arg,
+ uint16 mflags __attribute__((unused)))
{
#ifdef HAVE_REPLICATION
const Check_field_param check_param = { this };
@@ -6669,7 +6672,7 @@ int Field_string::compatible_field_size(uint field_metadata,
check_field_for_37426, &check_param))
return FALSE; // Not compatible field sizes
#endif
- return Field::compatible_field_size(field_metadata, rli_arg);
+ return Field::compatible_field_size(field_metadata, rli_arg, mflags);
}
@@ -7014,9 +7017,8 @@ const uint Field_varstring::MAX_SIZE= UINT_MAX16;
*/
int Field_varstring::do_save_field_metadata(uchar *metadata_ptr)
{
- char *ptr= (char *)metadata_ptr;
DBUG_ASSERT(field_length <= 65535);
- int2store(ptr, field_length);
+ int2store((char*)metadata_ptr, field_length);
return 2;
}
@@ -9221,8 +9223,13 @@ uint Field_bit::get_key_image(uchar *buff, uint length, imagetype type_arg)
*/
int Field_bit::do_save_field_metadata(uchar *metadata_ptr)
{
- *metadata_ptr= bit_len;
- *(metadata_ptr + 1)= bytes_in_rec;
+ /*
+ Since this class and Field_bit_as_char have different ideas of
+ what should be stored here, we compute the values of the metadata
+ explicitly using the field_length.
+ */
+ metadata_ptr[0]= field_length % 8;
+ metadata_ptr[1]= field_length / 8;
return 2;
}
@@ -9262,20 +9269,26 @@ uint Field_bit::pack_length_from_metadata(uint field_metadata)
@retval 1 if this field's size is >= the source field's size
*/
int Field_bit::compatible_field_size(uint field_metadata,
- const Relay_log_info * __attribute__((unused)))
+ const Relay_log_info * __attribute__((unused)),
+ uint16 mflags)
{
- int compatible= 0;
- uint const source_size= pack_length_from_metadata(field_metadata);
- uint const destination_size= row_pack_length();
- uint const from_bit_len= field_metadata & 0x00ff;
- uint const from_len= (field_metadata >> 8U) & 0x00ff;
- if ((bit_len == 0) || (from_bit_len == 0))
- compatible= (source_size <= destination_size);
- else if (from_bit_len > bit_len)
- compatible= (from_len < bytes_in_rec);
- else
- compatible= ((from_bit_len <= bit_len) && (from_len <= bytes_in_rec));
- return (compatible);
+ uint from_bit_len= 8 * (field_metadata >> 8) + (field_metadata & 0xff);
+ uint to_bit_len= max_display_length();
+
+ /*
+ If the bit length exact flag is clear, we are dealing with an old
+ master, so we allow some less strict behaviour if replicating by
+ moving both bit lengths to an even multiple of 8.
+
+ We do this by computing the number of bytes to store the field
+ instead, and then compare the result.
+ */
+ if (!(mflags & Table_map_log_event::TM_BIT_LEN_EXACT_F)) {
+ from_bit_len= (from_bit_len + 7) / 8;
+ to_bit_len= (to_bit_len + 7) / 8;
+ }
+
+ return from_bit_len <= to_bit_len;
}
diff --git a/sql/field.h b/sql/field.h
index 04d534114e6..1907ad803bc 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008, 2009 Sun Microsystems, Inc.
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/*
Because of the function new_field() all field classes that have static
@@ -55,7 +55,11 @@ public:
static void operator delete(void *ptr_arg, size_t size) { TRASH(ptr_arg, size); }
uchar *ptr; // Position to field in record
- uchar *null_ptr; // Byte where null_bit is
+ /**
+ Byte where the @c NULL bit is stored inside a record. If this Field is a
+ @c NOT @c NULL field, this member is @c NULL.
+ */
+ uchar *null_ptr;
/*
Note that you can use table->in_use as replacement for current_thd member
only inside of val_*() and store() members (e.g. you can't use it in cons)
@@ -165,7 +169,7 @@ public:
*/
virtual uint32 pack_length_in_rec() const { return pack_length(); }
virtual int compatible_field_size(uint field_metadata,
- const Relay_log_info *);
+ const Relay_log_info *, uint16 mflags);
virtual uint pack_length_from_metadata(uint field_metadata)
{ return field_metadata; }
/*
@@ -261,6 +265,9 @@ public:
inline void set_notnull(my_ptrdiff_t row_offset= 0)
{ if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; }
inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; }
+ /**
+ Signals that this field is NULL-able.
+ */
inline bool real_maybe_null(void) { return null_ptr != 0; }
enum {
@@ -803,7 +810,7 @@ public:
uint pack_length_from_metadata(uint field_metadata);
uint row_pack_length() { return pack_length(); }
int compatible_field_size(uint field_metadata,
- const Relay_log_info *rli);
+ const Relay_log_info *rli, uint16 mflags);
uint is_equal(Create_field *new_field);
virtual const uchar *unpack(uchar* to, const uchar *from,
uint param_data, bool low_byte_first);
@@ -1499,7 +1506,7 @@ public:
return (((field_metadata >> 4) & 0x300) ^ 0x300) + (field_metadata & 0x00ff);
}
int compatible_field_size(uint field_metadata,
- const Relay_log_info *rli);
+ const Relay_log_info *rli, uint16 mflags);
uint row_pack_length() { return (field_length + 1); }
int pack_cmp(const uchar *a,const uchar *b,uint key_length,
my_bool insert_or_update);
@@ -1968,7 +1975,7 @@ public:
uint row_pack_length()
{ return (bytes_in_rec + ((bit_len > 0) ? 1 : 0)); }
int compatible_field_size(uint field_metadata,
- const Relay_log_info *rli);
+ const Relay_log_info *rli, uint16 mflags);
void sql_type(String &str) const;
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length, bool low_byte_first);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 9a53f258754..67ef4f95368 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -122,13 +122,18 @@ set_field_to_null(Field *field)
return 0;
}
field->reset();
- if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN)
- {
+ switch (field->table->in_use->count_cuted_fields) {
+ case CHECK_FIELD_WARN:
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ /* fall through */
+ case CHECK_FIELD_IGNORE:
return 0;
+ case CHECK_FIELD_ERROR_FOR_NULL:
+ if (!field->table->in_use->no_errors)
+ my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
+ return -1;
}
- if (!field->table->in_use->no_errors)
- my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
+ DBUG_ASSERT(0); // impossible
return -1;
}
@@ -178,13 +183,18 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
field->table->auto_increment_field_not_null= FALSE;
return 0; // field is set in fill_record()
}
- if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN)
- {
+ switch (field->table->in_use->count_cuted_fields) {
+ case CHECK_FIELD_WARN:
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1);
+ /* fall through */
+ case CHECK_FIELD_IGNORE:
return 0;
+ case CHECK_FIELD_ERROR_FOR_NULL:
+ if (!field->table->in_use->no_errors)
+ my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
+ return -1;
}
- if (!field->table->in_use->no_errors)
- my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
+ DBUG_ASSERT(0); // impossible
return -1;
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 22b01fe38fa..2f16d730296 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1746,13 +1746,23 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
- handler **file_array= m_file;
+ handler **file_array;
table= table_arg;
table_share= share;
- do
+ /*
+ m_file can be NULL when using an old cached table in DROP TABLE, when the
+ table just has REMOVED PARTITIONING, see Bug#42438
+ */
+ if (m_file)
{
- (*file_array)->change_table_ptr(table_arg, share);
- } while (*(++file_array));
+ file_array= m_file;
+ DBUG_ASSERT(*file_array);
+ do
+ {
+ (*file_array)->change_table_ptr(table_arg, share);
+ } while (*(++file_array));
+ }
+
if (m_added_file && m_added_file[0])
{
/* if in middle of a drop/rename etc */
@@ -5081,6 +5091,7 @@ int ha_partition::info(uint flag)
file= m_file[handler_instance];
file->info(HA_STATUS_CONST);
+ stats.block_size= file->stats.block_size;
stats.create_time= file->stats.create_time;
ref_length= m_ref_length;
}
@@ -6054,7 +6065,13 @@ void ha_partition::print_error(int error, myf errflag)
if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table);
else
- m_file[m_last_part]->print_error(error, errflag);
+ {
+ /* In case m_file has not been initialized, like in bug#42438 */
+ if (m_file)
+ m_file[m_last_part]->print_error(error, errflag);
+ else
+ handler::print_error(error, errflag);
+ }
DBUG_VOID_RETURN;
}
@@ -6064,7 +6081,12 @@ bool ha_partition::get_error_message(int error, String *buf)
DBUG_ENTER("ha_partition::get_error_message");
/* Should probably look for my own errors first */
- DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
+
+ /* In case m_file has not been initialized, like in bug#42438 */
+ if (m_file)
+ DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
+ DBUG_RETURN(handler::get_error_message(error, buf));
+
}
@@ -6432,9 +6454,22 @@ void ha_partition::release_auto_increment()
ulonglong next_auto_inc_val;
lock_auto_increment();
next_auto_inc_val= ha_data->next_auto_inc_val;
+ /*
+ If the current auto_increment values is lower than the reserved
+ value, and the reserved value was reserved by this thread,
+ we can lower the reserved value.
+ */
if (next_insert_id < next_auto_inc_val &&
auto_inc_interval_for_cur_row.maximum() >= next_auto_inc_val)
- ha_data->next_auto_inc_val= next_insert_id;
+ {
+ THD *thd= ha_thd();
+ /*
+ Check that we do not lower the value because of a failed insert
+ with SET INSERT_ID, i.e. forced/non generated values.
+ */
+ if (thd->auto_inc_intervals_forced.maximum() < next_insert_id)
+ ha_data->next_auto_inc_val= next_insert_id;
+ }
DBUG_PRINT("info", ("ha_data->next_auto_inc_val: %lu",
(ulong) ha_data->next_auto_inc_val));
diff --git a/sql/item.cc b/sql/item.cc
index cf0d6615999..c2d1bb30d0a 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -554,6 +554,18 @@ Item_ident::Item_ident(Name_resolution_context *context_arg,
}
+Item_ident::Item_ident(TABLE_LIST *view_arg, const char *field_name_arg)
+ :orig_db_name(NullS), orig_table_name(view_arg->table_name),
+ orig_field_name(field_name_arg), context(&view_arg->view->select_lex.context),
+ db_name(NullS), table_name(view_arg->alias),
+ field_name(field_name_arg),
+ alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX),
+ cached_table(NULL), depended_from(NULL)
+{
+ name = (char*) field_name_arg;
+}
+
+
/**
Constructor used by Item_field & Item_*_ref (see Item comment)
*/
@@ -2218,14 +2230,14 @@ String *Item_int::val_str(String *str)
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
- str->set(value, &my_charset_bin);
+ str->set_int(value, unsigned_flag, &my_charset_bin);
return str;
}
void Item_int::print(String *str, enum_query_type query_type)
{
// my_charset_bin is good enough for numbers
- str_value.set(value, &my_charset_bin);
+ str_value.set_int(value, unsigned_flag, &my_charset_bin);
str->append(str_value);
}
@@ -3512,7 +3524,7 @@ void Item_copy_decimal::copy()
{
my_decimal *nr= item->val_decimal(&cached_value);
if (nr && nr != &cached_value)
- memcpy (&cached_value, nr, sizeof (my_decimal));
+ my_decimal2decimal (nr, &cached_value);
null_value= item->null_value;
}
@@ -4309,17 +4321,33 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
It's not an Item_field in the select list so we must make a new
Item_ref to point to the Item in the select list and replace the
Item_field created by the parser with the new Item_ref.
+
+ NOTE: If we are fixing an alias reference inside ORDER/GROUP BY
+ item tree, then we use new Item_ref as an intermediate value
+ to resolve referenced item only.
+ In this case the new Item_ref item is unused.
*/
Item_ref *rf= new Item_ref(context, db_name,table_name,field_name);
if (!rf)
return 1;
- thd->change_item_tree(reference, rf);
+
+ bool save_group_fix_field= thd->lex->current_select->group_fix_field;
/*
- Because Item_ref never substitutes itself with other items
- in Item_ref::fix_fields(), we can safely use the original
- pointer to it even after fix_fields()
- */
- return rf->fix_fields(thd, reference) || rf->check_cols(1);
+ No need for recursive resolving of aliases.
+ */
+ thd->lex->current_select->group_fix_field= 0;
+
+ bool ret= rf->fix_fields(thd, (Item **) &rf) || rf->check_cols(1);
+ thd->lex->current_select->group_fix_field= save_group_fix_field;
+ if (ret)
+ return TRUE;
+
+ if (save_group_fix_field && alias_name_used)
+ thd->change_item_tree(reference, *rf->ref);
+ else
+ thd->change_item_tree(reference, rf);
+
+ return FALSE;
}
}
}
@@ -4468,6 +4496,7 @@ void Item_field::cleanup()
I.e. we can drop 'field'.
*/
field= result_field= 0;
+ item_equal= NULL;
null_value= FALSE;
DBUG_VOID_RETURN;
}
@@ -5043,14 +5072,22 @@ int Item_field::save_in_field(Field *to, bool no_conversions)
if (result_field->is_null())
{
null_value=1;
- res= set_field_to_null_with_conversions(to, no_conversions);
+ return set_field_to_null_with_conversions(to, no_conversions);
}
- else
+ to->set_notnull();
+
+ /*
+ If we're setting the same field as the one we're reading from there's
+ nothing to do. This can happen in 'SET x = x' type of scenarios.
+ */
+ if (to == result_field)
{
- to->set_notnull();
- res= field_conv(to,result_field);
null_value=0;
+ return 0;
}
+
+ res= field_conv(to,result_field);
+ null_value=0;
return res;
}
@@ -5150,7 +5187,7 @@ int Item::save_in_field(Field *field, bool no_conversions)
field->set_notnull();
error=field->store(nr, unsigned_flag);
}
- return error ? error : (field->table->in_use->is_error() ? 2 : 0);
+ return error ? error : (field->table->in_use->is_error() ? 1 : 0);
}
@@ -5683,9 +5720,14 @@ void Item_field::print(String *str, enum_query_type query_type)
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff),str->charset());
field->val_str(&tmp);
- str->append('\'');
- str->append(tmp);
- str->append('\'');
+ if (field->is_null())
+ str->append("NULL");
+ else
+ {
+ str->append('\'');
+ str->append(tmp);
+ str->append('\'');
+ }
return;
}
Item_ident::print(str, query_type);
@@ -5708,6 +5750,20 @@ Item_ref::Item_ref(Name_resolution_context *context_arg,
}
+Item_ref::Item_ref(TABLE_LIST *view_arg, Item **item,
+ const char *field_name_arg, bool alias_name_used_arg)
+ :Item_ident(view_arg, field_name_arg),
+ result_field(NULL), ref(item)
+{
+ alias_name_used= alias_name_used_arg;
+ /*
+ This constructor is used to create some internal references over fixed items
+ */
+ if (ref && *ref && (*ref)->fixed)
+ set_properties();
+}
+
+
/**
Resolve the name of a reference to a column reference.
@@ -6481,7 +6537,8 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
{
if (!arg)
{
- if (field_arg->flags & NO_DEFAULT_VALUE_FLAG)
+ if (field_arg->flags & NO_DEFAULT_VALUE_FLAG &&
+ field_arg->real_type() != MYSQL_TYPE_ENUM)
{
if (field_arg->reset())
{
diff --git a/sql/item.h b/sql/item.h
index 8bcfaa286c7..e99aac3a804 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -905,6 +905,7 @@ public:
virtual bool change_context_processor(uchar *context) { return 0; }
virtual bool reset_query_id_processor(uchar *query_id_arg) { return 0; }
virtual bool is_expensive_processor(uchar *arg) { return 0; }
+ virtual bool find_item_processor(uchar *arg) { return this == (void *) arg; }
virtual bool register_field_in_read_map(uchar *arg) { return 0; }
virtual bool enumerate_field_refs_processor(uchar *arg) { return 0; }
virtual bool mark_as_eliminated_processor(uchar *arg) { return 0; }
@@ -1431,6 +1432,7 @@ public:
const char *db_name_arg, const char *table_name_arg,
const char *field_name_arg);
Item_ident(THD *thd, Item_ident *item);
+ Item_ident(TABLE_LIST *view_arg, const char *field_name_arg);
const char *full_name() const;
void cleanup();
bool remove_dependence_processor(uchar * arg);
@@ -2252,6 +2254,8 @@ public:
Item_ref(Name_resolution_context *context_arg, Item **item,
const char *table_name_arg, const char *field_name_arg,
bool alias_name_used_arg= FALSE);
+ Item_ref(TABLE_LIST *view_arg, Item **item,
+ const char *field_name_arg, bool alias_name_used_arg= FALSE);
/* Constructor need to process subselect with temporary tables (see Item) */
Item_ref(THD *thd, Item_ref *item)
@@ -2310,7 +2314,10 @@ public:
return ref ? (*ref)->real_item() : this;
}
bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
- { return (*ref)->walk(processor, walk_subquery, arg); }
+ {
+ return (*ref)->walk(processor, walk_subquery, arg) ||
+ (this->*processor)(arg);
+ }
virtual void print(String *str, enum_query_type query_type);
bool result_as_longlong()
{
@@ -2348,6 +2355,11 @@ public:
if (ref && result_type() == ROW_RESULT)
(*ref)->bring_value();
}
+ bool get_time(MYSQL_TIME *ltime)
+ {
+ DBUG_ASSERT(fixed);
+ return (*ref)->get_time(ltime);
+ }
};
@@ -2368,6 +2380,12 @@ public:
{}
/* Constructor need to process subselect with temporary tables (see Item) */
Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {}
+ Item_direct_ref(TABLE_LIST *view_arg, Item **item,
+ const char *field_name_arg,
+ bool alias_name_used_arg= FALSE)
+ :Item_ref(view_arg, item, field_name_arg,
+ alias_name_used_arg)
+ {}
double val_real();
longlong val_int();
@@ -2393,6 +2411,10 @@ public:
/* Constructor need to process subselect with temporary tables (see Item) */
Item_direct_view_ref(THD *thd, Item_direct_ref *item)
:Item_direct_ref(thd, item) {}
+ Item_direct_view_ref(TABLE_LIST *view_arg, Item **item,
+ const char *field_name_arg)
+ :Item_direct_ref(view_arg, item, field_name_arg)
+ {}
bool fix_fields(THD *, Item **);
bool eq(const Item *item, bool binary_cmp) const;
@@ -3046,6 +3068,7 @@ public:
Item_cache_int(enum_field_types field_type_arg):
Item_cache(field_type_arg), value(0) {}
+ virtual void store(Item *item){ Item_cache::store(item); }
void store_longlong(Item *item, longlong val_arg);
double val_real();
longlong val_int();
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 3431c84b6d1..01eb1ca506a 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2006 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1021,12 +1021,12 @@ bool Arg_comparator::try_year_cmp_func(Item_result type)
@return cache item or original value.
*/
-Item** Arg_comparator::cache_converted_constant(THD *thd, Item **value,
+Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value,
Item **cache_item,
Item_result type)
{
/* Don't need cache if doing context analysis only. */
- if (!thd->is_context_analysis_only() &&
+ if (!thd_arg->is_context_analysis_only() &&
(*value)->const_item() && type != (*value)->result_type())
{
Item_cache *cache= Item_cache::get_cache(*value, type);
@@ -1189,12 +1189,21 @@ get_year_value(THD *thd, Item ***item_arg, Item **cache_arg,
/*
Coerce value to the 19XX form in order to correctly compare
YEAR(2) & YEAR(4) types.
+ Here we are converting all item values but YEAR(4) fields since
+ 1) YEAR(4) already has a regular YYYY form and
+ 2) we don't want to convert zero/bad YEAR(4) values to the
+ value of 2000.
*/
- if (value < 70)
- value+= 100;
- if (value <= 1900)
- value+= 1900;
-
+ Item *real_item= item->real_item();
+ if (!(real_item->type() == Item::FIELD_ITEM &&
+ ((Item_field *)real_item)->field->type() == MYSQL_TYPE_YEAR &&
+ ((Item_field *)real_item)->field->field_length == 4))
+ {
+ if (value < 70)
+ value+= 100;
+ if (value <= 1900)
+ value+= 1900;
+ }
/* Convert year to DATETIME of form YYYY-00-00 00:00:00 (YYYY0000000000). */
value*= 10000000000LL;
@@ -1367,12 +1376,12 @@ int Arg_comparator::compare_real()
int Arg_comparator::compare_decimal()
{
- my_decimal value1;
- my_decimal *val1= (*a)->val_decimal(&value1);
+ my_decimal decimal1;
+ my_decimal *val1= (*a)->val_decimal(&decimal1);
if (!(*a)->null_value)
{
- my_decimal value2;
- my_decimal *val2= (*b)->val_decimal(&value2);
+ my_decimal decimal2;
+ my_decimal *val2= (*b)->val_decimal(&decimal2);
if (!(*b)->null_value)
{
if (set_null)
@@ -1396,9 +1405,9 @@ int Arg_comparator::compare_e_real()
int Arg_comparator::compare_e_decimal()
{
- my_decimal value1, value2;
- my_decimal *val1= (*a)->val_decimal(&value1);
- my_decimal *val2= (*b)->val_decimal(&value2);
+ my_decimal decimal1, decimal2;
+ my_decimal *val1= (*a)->val_decimal(&decimal1);
+ my_decimal *val2= (*b)->val_decimal(&decimal2);
if ((*a)->null_value || (*b)->null_value)
return test((*a)->null_value && (*b)->null_value);
return test(my_decimal_cmp(val1, val2) == 0);
@@ -5410,13 +5419,13 @@ void Item_equal::merge(Item_equal *item)
members follow in a wrong order they are swapped. This is performed
again and again until we get all members in a right order.
- @param cmp function to compare field item
+ @param compare function to compare field item
@param arg context extra parameter for the cmp function
*/
-void Item_equal::sort(Item_field_cmpfunc cmp, void *arg)
+void Item_equal::sort(Item_field_cmpfunc compare, void *arg)
{
- exchange_sort<Item_field>(&fields, cmp, arg);
+ exchange_sort<Item_field>(&fields, compare, arg);
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 01d4ae67a3f..44da45a11e2 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1474,9 +1474,21 @@ public:
Item_cond(THD *thd, Item_cond *item);
Item_cond(List<Item> &nlist)
:Item_bool_func(), list(nlist), abort_on_null(0) {}
- bool add(Item *item) { return list.push_back(item); }
- bool add_at_head(Item *item) { return list.push_front(item); }
- void add_at_head(List<Item> *nlist) { list.prepand(nlist); }
+ bool add(Item *item)
+ {
+ DBUG_ASSERT(item);
+ return list.push_back(item);
+ }
+ bool add_at_head(Item *item)
+ {
+ DBUG_ASSERT(item);
+ return list.push_front(item);
+ }
+ void add_at_head(List<Item> *nlist)
+ {
+ DBUG_ASSERT(nlist->elements);
+ list.prepand(nlist);
+ }
bool fix_fields(THD *, Item **ref);
enum Type type() const { return COND_ITEM; }
@@ -1605,7 +1617,7 @@ public:
longlong val_int();
const char *func_name() const { return "multiple equal"; }
optimize_type select_optimize() const { return OPTIMIZE_EQUAL; }
- void sort(Item_field_cmpfunc cmp, void *arg);
+ void sort(Item_field_cmpfunc compare, void *arg);
friend class Item_equal_iterator;
void fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 6799fbddd3f..8541557c011 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@
class Create_native_func : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
Builder method, with no arguments.
@@ -69,7 +69,7 @@ protected:
class Create_func_arg0 : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
Builder method, with no arguments.
@@ -93,7 +93,7 @@ protected:
class Create_func_arg1 : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
Builder method, with one argument.
@@ -118,7 +118,7 @@ protected:
class Create_func_arg2 : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
Builder method, with two arguments.
@@ -144,7 +144,7 @@ protected:
class Create_func_arg3 : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
Builder method, with three arguments.
@@ -194,7 +194,7 @@ protected:
class Create_func_no_geom : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/** Singleton. */
static Create_func_no_geom s_singleton;
@@ -2315,7 +2315,7 @@ static bool has_named_parameters(List<Item> *params)
Create_func_no_geom Create_func_no_geom::s_singleton;
Item*
-Create_func_no_geom::create(THD * /* unused */,
+Create_func_no_geom::create_func(THD * /* unused */,
LEX_STRING /* unused */,
List<Item> * /* unused */)
{
@@ -2328,7 +2328,7 @@ Create_func_no_geom::create(THD * /* unused */,
Item*
-Create_qfunc::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_qfunc::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
LEX_STRING db;
@@ -2361,7 +2361,7 @@ Create_qfunc::create(THD *thd, LEX_STRING name, List<Item> *item_list)
Create_udf_func Create_udf_func::s_singleton;
Item*
-Create_udf_func::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_udf_func::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
udf_func *udf= find_udf(name.str, name.length);
DBUG_ASSERT(udf);
@@ -2512,7 +2512,7 @@ Create_sp_func::create_with_db(THD *thd, LEX_STRING db, LEX_STRING name,
Item*
-Create_native_func::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_native_func::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
if (has_named_parameters(item_list))
{
@@ -2525,7 +2525,7 @@ Create_native_func::create(THD *thd, LEX_STRING name, List<Item> *item_list)
Item*
-Create_func_arg0::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_func_arg0::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
int arg_count= 0;
@@ -2543,7 +2543,7 @@ Create_func_arg0::create(THD *thd, LEX_STRING name, List<Item> *item_list)
Item*
-Create_func_arg1::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_func_arg1::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
int arg_count= 0;
@@ -2569,7 +2569,7 @@ Create_func_arg1::create(THD *thd, LEX_STRING name, List<Item> *item_list)
Item*
-Create_func_arg2::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_func_arg2::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
int arg_count= 0;
@@ -2597,7 +2597,7 @@ Create_func_arg2::create(THD *thd, LEX_STRING name, List<Item> *item_list)
Item*
-Create_func_arg3::create(THD *thd, LEX_STRING name, List<Item> *item_list)
+Create_func_arg3::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
int arg_count= 0;
diff --git a/sql/item_create.h b/sql/item_create.h
index 9f439a3aead..848f4793c83 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2006 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -53,7 +53,7 @@ public:
@param item_list The list of arguments to the function, can be NULL
@return An item representing the parsed function call, or NULL
*/
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list) = 0;
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list) = 0;
protected:
/** Constructor */
@@ -80,7 +80,7 @@ public:
@param item_list The list of arguments to the function, can be NULL
@return An item representing the parsed function call
*/
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
The builder create method, for qualified functions.
@@ -128,7 +128,7 @@ extern Create_qfunc * find_qualified_function_builder(THD *thd);
class Create_udf_func : public Create_func
{
public:
- virtual Item *create(THD *thd, LEX_STRING name, List<Item> *item_list);
+ virtual Item *create_func(THD *thd, LEX_STRING name, List<Item> *item_list);
/**
The builder create method, for User Defined Functions.
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 6c295635d6c..dc7c75fb5d2 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3360,80 +3360,6 @@ longlong Item_master_pos_wait::val_int()
return event_count;
}
-#ifdef EXTRA_DEBUG
-void debug_sync_point(const char* lock_name, uint lock_timeout)
-{
- THD* thd=current_thd;
- User_level_lock* ull;
- struct timespec abstime;
- size_t lock_name_len;
- lock_name_len= strlen(lock_name);
- pthread_mutex_lock(&LOCK_user_locks);
-
- if (thd->ull)
- {
- item_user_lock_release(thd->ull);
- thd->ull=0;
- }
-
- /*
- If the lock has not been aquired by some client, we do not want to
- create an entry for it, since we immediately release the lock. In
- this case, we will not be waiting, but rather, just waste CPU and
- memory on the whole deal
- */
- if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
- (uchar*) lock_name,
- lock_name_len))))
- {
- pthread_mutex_unlock(&LOCK_user_locks);
- return;
- }
- ull->count++;
-
- /*
- Structure is now initialized. Try to get the lock.
- Set up control struct to allow others to abort locks
- */
- thd_proc_info(thd, "User lock");
- thd->mysys_var->current_mutex= &LOCK_user_locks;
- thd->mysys_var->current_cond= &ull->cond;
-
- set_timespec(abstime,lock_timeout);
- while (ull->locked && !thd->killed)
- {
- int error= pthread_cond_timedwait(&ull->cond, &LOCK_user_locks, &abstime);
- if (error == ETIMEDOUT || error == ETIME)
- break;
- }
-
- if (ull->locked)
- {
- if (!--ull->count)
- delete ull; // Should never happen
- }
- else
- {
- ull->locked=1;
- ull->set_thread(thd);
- thd->ull=ull;
- }
- pthread_mutex_unlock(&LOCK_user_locks);
- pthread_mutex_lock(&thd->mysys_var->mutex);
- thd_proc_info(thd, 0);
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- pthread_mutex_unlock(&thd->mysys_var->mutex);
- pthread_mutex_lock(&LOCK_user_locks);
- if (thd->ull)
- {
- item_user_lock_release(thd->ull);
- thd->ull=0;
- }
- pthread_mutex_unlock(&LOCK_user_locks);
-}
-
-#endif
/**
Get a user level lock. If the thread has an old lock this is first released.
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 28de03bf049..7535c1fa80b 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -30,7 +30,8 @@
*/
Item_row::Item_row(List<Item> &arg):
- Item(), used_tables_cache(0), const_item_cache(1), with_null(0)
+ Item(), used_tables_cache(0), not_null_tables_cache(0),
+ const_item_cache(1), with_null(0)
{
//TODO: think placing 2-3 component items in item (as it done for function)
@@ -71,7 +72,13 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
Item *item= *arg;
used_tables_cache |= item->used_tables();
const_item_cache&= item->const_item() && !with_null;
- if (const_item_cache)
+ not_null_tables_cache|= item->not_null_tables();
+ /*
+ Some subqueries transformations aren't done in the view_prepare_mode thus
+ is_null() will fail. So we skip is_null() calculation for CREATE VIEW as
+ not necessary.
+ */
+ if (const_item_cache && !thd->lex->view_prepare_mode)
{
if (item->cols() > 1)
with_null|= item->null_inside();
diff --git a/sql/item_row.h b/sql/item_row.h
index 67441f49603..76d1c875e7d 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -16,7 +16,7 @@
class Item_row: public Item
{
Item **items;
- table_map used_tables_cache;
+ table_map used_tables_cache, not_null_tables_cache;
uint arg_count;
bool const_item_cache;
bool with_null;
@@ -26,6 +26,7 @@ public:
Item(),
items(item->items),
used_tables_cache(item->used_tables_cache),
+ not_null_tables_cache(0),
arg_count(item->arg_count),
const_item_cache(item->const_item_cache),
with_null(0)
@@ -65,6 +66,7 @@ public:
bool const_item() const { return const_item_cache; };
enum Item_result result_type() const { return ROW_RESULT; }
void update_used_tables();
+ table_map not_null_tables() const { return not_null_tables_cache; }
virtual void print(String *str, enum_query_type query_type);
bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index b4dd2905fa4..810c4993379 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -324,7 +324,7 @@ String *Item_func_concat::val_str(String *str)
}
else if (str->alloced_length() >= res->length()+res2->length())
{
- if (str == res2)
+ if (str->ptr() == res2->ptr())
str->replace(0,0,*res);
else
{
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 59e7c0df5b6..0a6375e80a5 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -694,8 +694,9 @@ public:
String *val_str(String *);
void fix_length_and_dec()
{
+ ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2;
+ max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH);
collation.set(args[0]->collation);
- max_length= args[0]->max_length * 2 + 2;
}
};
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index dad05981af4..f8e9bde282a 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -273,9 +273,13 @@ bool Item_subselect::exec()
{
int res;
- if (thd->is_error())
- /* Do not execute subselect in case of a fatal error */
+ /*
+ Do not execute subselect in case of a fatal error
+ or if the query has been killed.
+ */
+ if (thd->is_error() || thd->killed)
return 1;
+
/*
Simulate a failure in sub-query execution. Used to test e.g.
out of memory or query being killed conditions.
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 6f852e1016c..d87080a2fee 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -642,7 +642,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
default:
DBUG_ASSERT(0);
};
- setup_item(args[0], NULL);
+ setup_hybrid(args[0], NULL);
/* MIN/MAX can return NULL for empty set indepedent of the used column */
maybe_null= 1;
unsigned_flag=item->unsigned_flag;
@@ -676,7 +676,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
of the original MIN/MAX object and it is saved in this object's cache.
*/
-void Item_sum_hybrid::setup_item(Item *item, Item *value_arg)
+void Item_sum_hybrid::setup_hybrid(Item *item, Item *value_arg)
{
value= Item_cache::get_cache(item);
value->setup(item);
@@ -1646,7 +1646,7 @@ void Item_sum_hybrid::no_rows_in_result()
Item *Item_sum_min::copy_or_same(THD* thd)
{
Item_sum_min *item= new (thd->mem_root) Item_sum_min(thd, this);
- item->setup_item(args[0], value);
+ item->setup_hybrid(args[0], value);
return item;
}
@@ -1669,7 +1669,7 @@ bool Item_sum_min::add()
Item *Item_sum_max::copy_or_same(THD* thd)
{
Item_sum_max *item= new (thd->mem_root) Item_sum_max(thd, this);
- item->setup_item(args[0], value);
+ item->setup_hybrid(args[0], value);
return item;
}
@@ -3397,6 +3397,8 @@ String* Item_func_group_concat::val_str(String* str)
void Item_func_group_concat::print(String *str, enum_query_type query_type)
{
+ /* orig_args is not filled with valid values until fix_fields() */
+ Item **pargs= fixed ? orig_args : args;
str->append(STRING_WITH_LEN("group_concat("));
if (distinct)
str->append(STRING_WITH_LEN("distinct "));
@@ -3404,7 +3406,7 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type)
{
if (i)
str->append(',');
- args[i]->print(str, query_type);
+ pargs[i]->print(str, query_type);
}
if (arg_count_order)
{
@@ -3413,7 +3415,7 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type)
{
if (i)
str->append(',');
- (*order[i]->item)->print(str, query_type);
+ pargs[i + arg_count_field]->print(str, query_type);
if (order[i]->asc)
str->append(STRING_WITH_LEN(" ASC"));
else
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 110265fd8b4..2705b9f6e3e 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2006 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -368,7 +368,7 @@ public:
*/
void no_rows_in_result() { clear(); }
- virtual bool setup(THD *thd) {return 0;}
+ virtual bool setup(THD* thd) {return 0;}
virtual void make_unique() {}
Item *get_tmp_table_item(THD *thd);
virtual Field *create_tmp_field(bool group, TABLE *table,
@@ -858,7 +858,7 @@ protected:
was_values(item->was_values)
{ }
bool fix_fields(THD *, Item **);
- void setup_item(Item *item, Item *value_arg);
+ void setup_hybrid(Item *item, Item *value_arg);
void clear();
double val_real();
longlong val_int();
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index a244a674bbf..d91ccee1575 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -882,7 +882,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
value= value*LL(10) + (longlong) (*str - '0');
if (transform_msec && i == count - 1) // microseconds always last
{
- int msec_length= 6 - (int)(str - start);
+ int msec_length= 6 - (int) (str - start);
if (msec_length > 0)
value*= (long)log_10_int[msec_length];
}
diff --git a/sql/log.cc b/sql/log.cc
index f52e68dd1b9..cc236ef13d2 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -964,6 +964,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
uint user_host_len= 0;
ulonglong query_utime, lock_utime;
+ DBUG_ASSERT(thd->enable_slow_log);
/*
Print the message to the buffer if we have slow log enabled
*/
@@ -1727,11 +1728,14 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
binlog_trans_log_savepos(thd, (my_off_t*) sv);
/* Write it to the binary log */
+ String log_query;
+ if (log_query.append(STRING_WITH_LEN("SAVEPOINT ")) ||
+ log_query.append(thd->lex->ident.str, thd->lex->ident.length))
+ DBUG_RETURN(1);
int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
- int const error=
- thd->binlog_query(THD::STMT_QUERY_TYPE,
- thd->query(), thd->query_length(), TRUE, FALSE, errcode);
- DBUG_RETURN(error);
+ Query_log_event qinfo(thd, log_query.c_ptr_safe(), log_query.length(),
+ TRUE, TRUE, errcode);
+ DBUG_RETURN(mysql_bin_log.write(&qinfo));
}
static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
@@ -1746,11 +1750,14 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
if (unlikely(thd->transaction.all.modified_non_trans_table ||
(thd->options & OPTION_KEEP_LOG)))
{
+ String log_query;
+ if (log_query.append(STRING_WITH_LEN("ROLLBACK TO ")) ||
+ log_query.append(thd->lex->ident.str, thd->lex->ident.length))
+ DBUG_RETURN(1);
int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
- int error=
- thd->binlog_query(THD::STMT_QUERY_TYPE,
- thd->query(), thd->query_length(), TRUE, FALSE, errcode);
- DBUG_RETURN(error);
+ Query_log_event qinfo(thd, log_query.c_ptr_safe(), log_query.length(),
+ TRUE, TRUE, errcode);
+ DBUG_RETURN(mysql_bin_log.write(&qinfo));
}
binlog_trans_log_truncate(thd, *(my_off_t*)sv);
DBUG_RETURN(0);
@@ -4100,11 +4107,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
DBUG_ASSERT(table->s->table_map_id != ULONG_MAX);
- Table_map_log_event::flag_set const
- flags= Table_map_log_event::TM_NO_FLAGS;
-
Table_map_log_event
- the_event(this, table, table->s->table_map_id, is_trans, flags);
+ the_event(this, table, table->s->table_map_id, is_trans);
if (is_trans && binlog_table_maps == 0)
binlog_start_trans_and_stmt();
@@ -4313,7 +4317,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
*/
const char *local_db= event_info->get_db();
if ((!(thd->options & OPTION_BIN_LOG)) ||
- (!binlog_filter->db_ok(local_db)))
+ (thd->lex->sql_command != SQLCOM_ROLLBACK_TO_SAVEPOINT &&
+ thd->lex->sql_command != SQLCOM_SAVEPOINT &&
+ !binlog_filter->db_ok(local_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
DBUG_RETURN(0);
@@ -4717,7 +4723,7 @@ int query_error_code(THD *thd, bool not_killed)
{
int error;
- if (not_killed)
+ if (not_killed || (thd->killed == THD::KILL_BAD_DATA))
{
error= thd->is_error() ? thd->main_da.sql_errno() : 0;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 94b6b35ec8c..f00b271e491 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -3062,10 +3062,7 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli,
::do_apply_event(), then the companion SET also have so
we don't need to reset_one_shot_variables().
*/
- if (!strncmp(query_arg, "BEGIN", q_len_arg) ||
- !strncmp(query_arg, "COMMIT", q_len_arg) ||
- !strncmp(query_arg, "ROLLBACK", q_len_arg) ||
- rpl_filter->db_ok(thd->db))
+ if (is_trans_keyword() || rpl_filter->db_ok(thd->db))
{
thd->set_time((time_t)when);
thd->set_query((char*)query_arg, q_len_arg);
@@ -3169,6 +3166,18 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli,
const char* found_semicolon= NULL;
mysql_parse(thd, thd->query(), thd->query_length(), &found_semicolon);
log_slow_statement(thd);
+
+ /*
+ Resetting the enable_slow_log thd variable.
+
+ We need to reset it back to the opt_log_slow_slave_statements
+ value after the statement execution (and slow logging
+ is done). It might have changed if the statement was an
+ admin statement (in which case, down in mysql_parse execution
+ thd->enable_slow_log is set to the value of
+ opt_log_slow_admin_statements).
+ */
+ thd->enable_slow_log= opt_log_slow_slave_statements;
}
else
{
@@ -7886,7 +7895,7 @@ int Table_map_log_event::save_field_metadata()
*/
#if !defined(MYSQL_CLIENT)
Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
- bool is_transactional, uint16 flags)
+ bool is_transactional)
: Log_event(thd, 0, true),
m_table(tbl),
m_dbnam(tbl->s->db.str),
@@ -7896,7 +7905,7 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
m_colcnt(tbl->s->fields),
m_memory(NULL),
m_table_id(tid),
- m_flags(flags),
+ m_flags(TM_BIT_LEN_EXACT_F),
m_data_size(0),
m_field_metadata(0),
m_field_metadata_size(0),
@@ -8154,8 +8163,10 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli)
inside Relay_log_info::clear_tables_to_lock() by calling the
table_def destructor explicitly.
*/
- new (&table_list->m_tabledef) table_def(m_coltype, m_colcnt,
- m_field_metadata, m_field_metadata_size, m_null_bits);
+ new (&table_list->m_tabledef)
+ table_def(m_coltype, m_colcnt,
+ m_field_metadata, m_field_metadata_size,
+ m_null_bits, m_flags);
table_list->m_tabledef_valid= TRUE;
/*
@@ -8743,7 +8754,7 @@ static bool record_compare(TABLE *table)
DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
bool result= FALSE;
- uchar saved_x[2], saved_filler[2];
+ uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0};
if (table->s->null_bytes > 0)
{
@@ -9467,7 +9478,7 @@ Incident_log_event::write_data_body(IO_CACHE *file)
they will always be printed for the first event.
*/
st_print_event_info::st_print_event_info()
- :flags2_inited(0), sql_mode_inited(0),
+ :flags2_inited(0), sql_mode_inited(0), sql_mode(0),
auto_increment_increment(0),auto_increment_offset(0), charset_inited(0),
lc_time_names_number(~0),
charset_database_number(ILLEGAL_CHARSET_INFO_NUMBER),
diff --git a/sql/log_event.h b/sql/log_event.h
index 36715b1d151..6c41c906328 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -1682,6 +1682,28 @@ public: /* !!! Public in this patch to allow old usage */
const char *query_arg,
uint32 q_len_arg);
#endif /* HAVE_REPLICATION */
+ /*
+ If true, the event always be applied by slave SQL thread or be printed by
+ mysqlbinlog
+ */
+ bool is_trans_keyword()
+ {
+ /*
+ Before the patch for bug#50407, The 'SAVEPOINT and ROLLBACK TO'
+ queries input by user was written into log events directly.
+ So the keywords can be written in both upper case and lower case
+ together, strncasecmp is used to check both cases. they also could be
+ binlogged with comments in the front of these keywords. for examples:
+ / * bla bla * / SAVEPOINT a;
+ / * bla bla * / ROLLBACK TO a;
+ but we don't handle these cases and after the patch, both quiries are
+ binlogged in upper case with no comments.
+ */
+ return !strncmp(query, "BEGIN", q_len) ||
+ !strncmp(query, "COMMIT", q_len) ||
+ !strncasecmp(query, "SAVEPOINT", 9) ||
+ !strncasecmp(query, "ROLLBACK", 8);
+ }
};
@@ -3298,16 +3320,14 @@ public:
/* Special constants representing sets of flags */
enum
{
- TM_NO_FLAGS = 0U
+ TM_NO_FLAGS = 0U,
+ TM_BIT_LEN_EXACT_F = (1U << 0)
};
- void set_flags(flag_set flag) { m_flags |= flag; }
- void clear_flags(flag_set flag) { m_flags &= ~flag; }
flag_set get_flags(flag_set flag) const { return m_flags & flag; }
#ifndef MYSQL_CLIENT
- Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
- bool is_transactional, uint16 flags);
+ Table_map_log_event(THD *thd, TABLE *tbl, ulong tid, bool is_transactional);
#endif
#ifdef HAVE_REPLICATION
Table_map_log_event(const char *buf, uint event_len,
@@ -3320,7 +3340,7 @@ public:
table_def *create_table_def()
{
return new table_def(m_coltype, m_colcnt, m_field_metadata,
- m_field_metadata_size, m_null_bits);
+ m_field_metadata_size, m_null_bits, m_flags);
}
ulong get_table_id() const { return m_table_id; }
const char *get_table_name() const { return m_tblnam; }
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index cda550f3c92..87225ce55eb 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -15,7 +15,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
{
DBUG_ENTER("Old_rows_log_event::do_apply_event(st_relay_log_info*)");
int error= 0;
- THD *thd= ev->thd;
+ THD *ev_thd= ev->thd;
uchar const *row_start= ev->m_rows_buf;
/*
@@ -33,17 +33,17 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
DBUG_ASSERT(ev->get_flags(Old_rows_log_event::STMT_END_F));
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
- close_thread_tables(thd);
- thd->clear_error();
+ close_thread_tables(ev_thd);
+ ev_thd->clear_error();
DBUG_RETURN(0);
}
/*
- 'thd' has been set by exec_relay_log_event(), just before calling
+ 'ev_thd' has been set by exec_relay_log_event(), just before calling
do_apply_event(). We still check here to prevent future coding
errors.
*/
- DBUG_ASSERT(rli->sql_thd == thd);
+ DBUG_ASSERT(rli->sql_thd == ev_thd);
/*
If there is no locks taken, this is the first binrow event seen
@@ -51,10 +51,10 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
used in the transaction and proceed with execution of the actual
event.
*/
- if (!thd->lock)
+ if (!ev_thd->lock)
{
/*
- Lock_tables() reads the contents of thd->lex, so they must be
+ Lock_tables() reads the contents of ev_thd->lex, so they must be
initialized.
We also call the mysql_reset_thd_for_next_command(), since this
@@ -62,24 +62,24 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
call might reset the value of current_stmt_binlog_row_based, so
we need to do any changes to that value after this function.
*/
- lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ lex_start(ev_thd);
+ mysql_reset_thd_for_next_command(ev_thd);
/*
Check if the slave is set to use SBR. If so, it should switch
to using RBR until the end of the "statement", i.e., next
STMT_END_F or next error.
*/
- if (!thd->current_stmt_binlog_row_based &&
- mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG))
+ if (!ev_thd->current_stmt_binlog_row_based &&
+ mysql_bin_log.is_open() && (ev_thd->options & OPTION_BIN_LOG))
{
- thd->set_current_stmt_binlog_row_based();
+ ev_thd->set_current_stmt_binlog_row_based();
}
- if (simple_open_n_lock_tables(thd, rli->tables_to_lock))
+ if (simple_open_n_lock_tables(ev_thd, rli->tables_to_lock))
{
- uint actual_error= thd->main_da.sql_errno();
- if (thd->is_slave_error || thd->is_fatal_error)
+ uint actual_error= ev_thd->main_da.sql_errno();
+ if (ev_thd->is_slave_error || ev_thd->is_fatal_error)
{
/*
Error reporting borrowed from Query_log_event with many excessive
@@ -87,9 +87,9 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
*/
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on opening tables",
- (actual_error ? thd->main_da.message() :
+ (actual_error ? ev_thd->main_da.message() :
"unexpected success or fatal error"));
- thd->is_slave_error= 1;
+ ev_thd->is_slave_error= 1;
}
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(actual_error);
@@ -109,9 +109,9 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
{
if (ptr->m_tabledef.compatible_with(rli, ptr->table))
{
- mysql_unlock_tables(thd, thd->lock);
- thd->lock= 0;
- thd->is_slave_error= 1;
+ mysql_unlock_tables(ev_thd, ev_thd->lock);
+ ev_thd->lock= 0;
+ ev_thd->is_slave_error= 1;
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(Old_rows_log_event::ERR_BAD_TABLE_DEF);
}
@@ -159,23 +159,23 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
TIMESTAMP column to a table with one.
So we call set_time(), like in SBR. Presently it changes nothing.
*/
- thd->set_time((time_t)ev->when);
+ ev_thd->set_time((time_t)ev->when);
/*
There are a few flags that are replicated with each row event.
Make sure to set/clear them before executing the main body of
the event.
*/
if (ev->get_flags(Old_rows_log_event::NO_FOREIGN_KEY_CHECKS_F))
- thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS;
+ ev_thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS;
else
- thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
+ ev_thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
if (ev->get_flags(Old_rows_log_event::RELAXED_UNIQUE_CHECKS_F))
- thd->options|= OPTION_RELAXED_UNIQUE_CHECKS;
+ ev_thd->options|= OPTION_RELAXED_UNIQUE_CHECKS;
else
- thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
+ ev_thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
/* A small test to verify that objects have consistent types */
- DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
+ DBUG_ASSERT(sizeof(ev_thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
/*
Now we are in a statement and will stay in a statement until we
@@ -192,7 +192,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
while (error == 0 && row_start < ev->m_rows_end)
{
uchar const *row_end= NULL;
- if ((error= do_prepare_row(thd, rli, table, row_start, &row_end)))
+ if ((error= do_prepare_row(ev_thd, rli, table, row_start, &row_end)))
break; // We should perform the after-row operation even in
// the case of error
@@ -202,7 +202,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
/* in_use can have been set to NULL in close_tables_for_reopen */
THD* old_thd= table->in_use;
if (!table->in_use)
- table->in_use= thd;
+ table->in_use= ev_thd;
error= do_exec_row(table);
table->in_use = old_thd;
switch (error)
@@ -216,11 +216,11 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
break;
default:
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->main_da.sql_errno(),
"Error in %s event: row application failed. %s",
ev->get_type_str(),
- thd->is_error() ? thd->main_da.message() : "");
- thd->is_slave_error= 1;
+ ev_thd->is_error() ? ev_thd->main_da.message() : "");
+ ev_thd->is_slave_error= 1;
break;
}
@@ -232,7 +232,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (!ev->cache_stmt)
{
DBUG_PRINT("info", ("Marked that we need to keep log"));
- thd->options|= OPTION_KEEP_LOG;
+ ev_thd->options|= OPTION_KEEP_LOG;
}
}
@@ -245,12 +245,12 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (error)
{ /* error has occured during the transaction */
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->main_da.sql_errno(),
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
ev->get_type_str(), table->s->db.str,
table->s->table_name.str,
- thd->is_error() ? thd->main_da.message() : "");
+ ev_thd->is_error() ? ev_thd->main_da.message() : "");
/*
If one day we honour --skip-slave-errors in row-based replication, and
@@ -263,9 +263,9 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
thread is certainly going to stop.
rollback at the caller along with sbr.
*/
- thd->reset_current_stmt_binlog_row_based();
- const_cast<Relay_log_info*>(rli)->cleanup_context(thd, error);
- thd->is_slave_error= 1;
+ ev_thd->reset_current_stmt_binlog_row_based();
+ const_cast<Relay_log_info*>(rli)->cleanup_context(ev_thd, error);
+ ev_thd->is_slave_error= 1;
DBUG_RETURN(error);
}
@@ -337,7 +337,7 @@ static bool record_compare(TABLE *table)
*/
bool result= FALSE;
- uchar saved_x[2], saved_filler[2];
+ uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0};
if (table->s->null_bytes > 0)
{
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 499d7d2fc24..6f5d0a82d6f 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -119,13 +119,15 @@ char* query_table_status(THD *thd,const char *db,const char *table_name);
#define WARN_DEPRECATED(Thd,Ver,Old,New) \
do { \
DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) > 0); \
- if (((uchar*)Thd) != NULL) \
+ if (((uchar*)Thd) != NULL) \
push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX_WITH_VER), \
- (Old), (Ver), (New)); \
+ ER_WARN_DEPRECATED_SYNTAX, \
+ ER(ER_WARN_DEPRECATED_SYNTAX), \
+ (Old), (New)); \
else \
- sql_print_warning("The syntax '%s' is deprecated and will be removed " \
- "in a future release. Please use %s instead.", (Old), (New)); \
+ sql_print_warning("'%s' is deprecated and will be removed " \
+ "in a future release. Please use '%s' instead.", \
+ (Old), (New)); \
} while(0)
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *system_charset_info;
@@ -592,20 +594,6 @@ protected:
/* Used to check GROUP BY list in the MODE_ONLY_FULL_GROUP_BY mode */
#define UNDEF_POS (-1)
-#ifdef EXTRA_DEBUG
-/**
- Sync points allow us to force the server to reach a certain line of code
- and block there until the client tells the server it is ok to go on.
- The client tells the server to block with SELECT GET_LOCK()
- and unblocks it with SELECT RELEASE_LOCK(). Used for debugging difficult
- concurrency problems
-*/
-#define DBUG_SYNC_POINT(lock_name,lock_timeout) \
- debug_sync_point(lock_name,lock_timeout)
-void debug_sync_point(const char* lock_name, uint lock_timeout);
-#else
-#define DBUG_SYNC_POINT(lock_name,lock_timeout)
-#endif /* EXTRA_DEBUG */
/* BINLOG_DUMP options */
@@ -1208,7 +1196,7 @@ int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
List<Item> &fields, List<Item> &all_fields, ORDER *order,
bool *hidden_group_fields);
bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
- Item **ref_pointer_array);
+ Item **ref_pointer_array, ORDER *group_list= NULL);
bool handle_select(THD *thd, LEX *lex, select_result *result,
ulong setup_tables_done_option);
@@ -2022,6 +2010,7 @@ extern my_bool opt_log, opt_slow_log;
extern ulong log_output_options;
extern my_bool opt_log_queries_not_using_indexes;
extern bool opt_disable_networking, opt_skip_show_db;
+extern bool opt_skip_name_resolve;
extern bool opt_ignore_builtin_innodb;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop, shutdown_in_progress;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 645b7498042..3b99b06a8d1 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -422,6 +422,7 @@ ulong log_output_options;
my_bool opt_log_queries_not_using_indexes= 0;
bool opt_error_log= IF_WIN(1,0);
bool opt_disable_networking=0, opt_skip_show_db=0;
+bool opt_skip_name_resolve=0;
my_bool opt_character_set_client_handshake= 1;
bool server_id_supplied = 0;
bool opt_endinfo, using_udf_functions;
@@ -1323,6 +1324,7 @@ void clean_up(bool print_message)
lex_free(); /* Free some memory */
item_create_cleanup();
set_var_free();
+ free_charsets();
if (!opt_noacl)
{
#ifdef HAVE_DLOPEN
@@ -5380,9 +5382,9 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
}
create_new_thread(thd);
}
-
+ DBUG_LEAVE;
decrement_handler_count();
- DBUG_RETURN(0);
+ return 0;
}
@@ -5478,8 +5480,9 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
create_new_thread(thd);
}
CloseHandle(connectOverlapped.hEvent);
+ DBUG_LEAVE;
decrement_handler_count();
- DBUG_RETURN(0);
+ return 0;
}
#endif /* __NT__ */
@@ -5715,9 +5718,9 @@ error:
if (handle_connect_file_map) CloseHandle(handle_connect_file_map);
if (event_connect_answer) CloseHandle(event_connect_answer);
if (smem_event_connect_request) CloseHandle(smem_event_connect_request);
-
+ DBUG_LEAVE;
decrement_handler_count();
- DBUG_RETURN(0);
+ return 0;
}
#endif /* HAVE_SMEM */
#endif /* EMBEDDED_LIBRARY */
@@ -5942,12 +5945,12 @@ struct my_option my_long_options[] =
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"auto-increment-increment", OPT_AUTO_INCREMENT,
- "Auto-increment columns are incremented by this",
+ "Auto-increment columns are incremented by this.",
(uchar**) &global_system_variables.auto_increment_increment,
(uchar**) &max_system_variables.auto_increment_increment, 0, GET_ULONG,
OPT_ARG, 1, 1, 65535, 0, 1, 0 },
{"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET,
- "Offset added to Auto-increment columns. Used when auto-increment-increment != 1",
+ "Offset added to Auto-increment columns. Used when auto-increment-increment != 1.",
(uchar**) &global_system_variables.auto_increment_offset,
(uchar**) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG,
1, 1, 65535, 0, 1, 0 },
@@ -5960,7 +5963,7 @@ struct my_option my_long_options[] =
(uchar**) &mysql_home_ptr, (uchar**) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"big-tables", OPT_BIG_TABLES,
- "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
+ "Allow big result sets by saving all temporary sets on file (solves most 'table full' errors).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
(uchar**) &my_bind_addr_str, (uchar**) &my_bind_addr_str, 0, GET_STR,
@@ -5968,11 +5971,10 @@ struct my_option my_long_options[] =
{"binlog_format", OPT_BINLOG_FORMAT,
"Does not have any effect without '--log-bin'. "
"Tell the master the form of binary logging to use: either 'row' for "
- "row-based binary logging, or 'statement' for statement-based binary "
+ "row-based binary logging, 'statement' for statement-based binary "
"logging, or 'mixed'. 'mixed' is statement-based binary logging except "
- "for those statements where only row-based is correct: those which "
- "involve user-defined functions (i.e. UDFs) or the UUID() function; for "
- "those, row-based binary logging is automatically used. "
+ "for statements where only row-based is correct: Statements that involve "
+ "user-defined functions (i.e., UDFs) or the UUID() function."
#ifdef HAVE_NDB_BINLOG
"If ndbcluster is enabled and binlog_format is `mixed', the format switches"
" to 'row' and back implicitly per each query accessing a NDB table."
@@ -5983,7 +5985,7 @@ struct my_option my_long_options[] =
"Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-ignore-db", OPT_BINLOG_IGNORE_DB,
- "Tells the master that updates to the given database should not be logged tothe binary log.",
+ "Tells the master that updates to the given database should not be logged to the binary log.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-row-event-max-size", OPT_BINLOG_ROWS_EVENT_MAX_SIZE,
"The maximum size of a row-based binary log event in bytes. Rows will be "
@@ -6028,10 +6030,10 @@ struct my_option my_long_options[] =
(uchar**) &max_system_variables.completion_type, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 2, 0, 1, 0},
{"concurrent-insert", OPT_CONCURRENT_INSERT,
- "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0",
+ "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0.",
(uchar**) &myisam_concurrent_insert, (uchar**) &myisam_concurrent_insert,
0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0},
- {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.",
+ {"console", OPT_CONSOLE, "Write error output on screen; don't remove the console window on windows.",
(uchar**) &opt_console, (uchar**) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG,
@@ -6089,7 +6091,7 @@ struct my_option my_long_options[] =
{"delay-key-write", OPT_DELAY_KEY_WRITE, "Type of DELAY_KEY_WRITE.",
0,0,0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL,
- "Don't flush key buffers between writes for any MyISAM table (Deprecated option, use --delay-key-write=all instead).",
+ "Don't flush key buffers between writes for any MyISAM table. (Deprecated option, use --delay-key-write=all instead.)",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_OPENSSL
{"des-key-file", OPT_DES_KEY_FILE,
@@ -6127,7 +6129,7 @@ struct my_option my_long_options[] =
/* See how it's handled in get_one_option() */
{"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.",
NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0,
+ {"exit-info", 'T', "Used for debugging. Use at your own risk.", 0, 0, 0,
GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"external-locking", OPT_USE_LOCKING, "Use system (external) locking (disabled by default). With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running. Disable with --skip-external-locking.",
(uchar**) &opt_external_locking, (uchar**) &opt_external_locking,
@@ -6146,11 +6148,11 @@ struct my_option my_long_options[] =
(uchar**) &extra_max_connections, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 100000,
0, 1, 0},
{"gdb", OPT_DEBUGGING,
- "Set up signals usable for debugging",
+ "Set up signals usable for debugging.",
(uchar**) &opt_debugging, (uchar**) &opt_debugging,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"general_log", OPT_GENERAL_LOG,
- "Enable|disable general log", (uchar**) &opt_log,
+ "Enable/disable general log.", (uchar**) &opt_log,
(uchar**) &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_LARGE_PAGES
{"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \
@@ -6159,9 +6161,10 @@ Disable with --skip-large-pages.",
0, 0, 0},
#endif
{"ignore-builtin-innodb", OPT_IGNORE_BUILTIN_INNODB ,
- "Disable initialization of builtin InnoDB plugin",
+ "Disable initialization of builtin InnoDB plugin.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection",
+ {"init-connect", OPT_INIT_CONNECT,
+ "Command(s) that are executed for each new connection.",
(uchar**) &opt_init_connect, (uchar**) &opt_init_connect, 0, GET_STR_ALLOC,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifndef DISABLE_GRANT_OPTIONS
@@ -6185,7 +6188,7 @@ each time the SQL thread starts.",
(uchar**) &lc_time_names_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{"local-infile", OPT_LOCAL_INFILE,
- "Enable/disable LOAD DATA LOCAL INFILE (takes values 1|0).",
+ "Enable/disable LOAD DATA LOCAL INFILE (takes values 1 or 0).",
(uchar**) &opt_local_infile,
(uchar**) &opt_local_infile, 0, GET_BOOL, OPT_ARG,
1, 0, 0, 0, 0, 0},
@@ -6223,8 +6226,9 @@ each time the SQL thread starts.",
*/
{"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
"If equal to 0 (the default), then when --log-bin is used, creation of "
- "a stored function (or trigger) is allowed only to users having the SUPER privilege "
- "and only if this stored function (trigger) may not break binary logging."
+ "a stored function (or trigger) is allowed only to users having the SUPER "
+ "privilege, and only if this stored function (trigger) may not break "
+ "binary logging."
"Note that if ALL connections to this server ALWAYS use row-based binary "
"logging, the security issues do not exist and the binary logging cannot "
"break, so you can safely set this to 1."
@@ -6281,7 +6285,7 @@ each time the SQL thread starts.",
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"log-tc", OPT_LOG_TC,
"Path to transaction coordinator log (used for transactions that affect "
- "more than one storage engine, when binary log is disabled)",
+ "more than one storage engine, when binary log is disabled).",
(uchar**) &opt_tc_log_file, (uchar**) &opt_tc_log_file, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_MMAP
@@ -6291,8 +6295,8 @@ each time the SQL thread starts.",
TC_LOG_PAGE_SIZE, 0},
#endif
{"log-update", OPT_UPDATE_LOG,
- "The update log is deprecated since version 5.0, is replaced by the binary \
-log and this option justs turns on --log-bin instead.",
+ "The update log is deprecated since version 5.0, is replaced by the binary "
+ "log and this option just turns on --log-bin instead.",
(uchar**) &opt_update_logname, (uchar**) &opt_update_logname, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
{"log-warnings", 'W', "Log some not critical warnings to the log file.",
@@ -6305,7 +6309,9 @@ log and this option justs turns on --log-bin instead.",
(uchar**) &max_system_variables.low_priority_updates,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"master-connect-retry", OPT_MASTER_CONNECT_RETRY,
- "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.",
+ "The number of seconds the slave thread will sleep before retrying to "
+ "connect to the master, in case the master goes down or the connection "
+ "is lost.",
(uchar**) &master_connect_retry, (uchar**) &master_connect_retry, 0, GET_UINT,
REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
{"master-host", OPT_MASTER_HOST,
@@ -6318,7 +6324,9 @@ thread is in the master's binlogs.",
(uchar**) &master_info_file, (uchar**) &master_info_file, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"master-password", OPT_MASTER_PASSWORD,
- "The password the slave thread will authenticate with when connecting to the master. If not set, an empty password is assumed.The value in master.info will take precedence if it can be read.",
+ "The password the slave thread will authenticate with when connecting to "
+ "the master. If not set, an empty password is assumed. The value in "
+ "master.info will take precedence if it can be read.",
(uchar**)&master_password, (uchar**)&master_password, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"master-port", OPT_MASTER_PORT,
@@ -6342,8 +6350,8 @@ thread is in the master's binlogs.",
(uchar**) &master_ssl_capath, (uchar**) &master_ssl_capath, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
{"master-ssl-cert", OPT_MASTER_SSL_CERT,
- "Master SSL certificate file name. Only applies if you have enabled \
-master-ssl",
+ "Master SSL certificate file name. Only applies if you have enabled "
+ "master-ssl.",
(uchar**) &master_ssl_cert, (uchar**) &master_ssl_cert, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
{"master-ssl-cipher", OPT_MASTER_SSL_CIPHER,
@@ -6417,14 +6425,14 @@ master-ssl",
#ifdef HAVE_NDB_BINLOG
{"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
"Threshold on number of epochs to be behind before reporting binlog status. "
- "E.g. 3 means that if the difference between what epoch has been received "
+ "E.g., 3 means that if the difference between what epoch has been received "
"from the storage nodes and what has been applied to the binlog is 3 or more, "
"a status message will be sent to the cluster log.",
(uchar**) &ndb_report_thresh_binlog_epoch_slip,
(uchar**) &ndb_report_thresh_binlog_epoch_slip,
0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0},
{"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
- "Threshold on percentage of free memory before reporting binlog status. E.g. "
+ "Threshold on percentage of free memory before reporting binlog status. E.g., "
"10 means that if amount of available memory for receiving binlog data from "
"the storage nodes goes below 10%, "
"a status message will be sent to the cluster log.",
@@ -6439,7 +6447,7 @@ master-ssl",
(uchar**) &global_system_variables.ndb_use_exact_count,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT,
- "same as --ndb-use-exact-count.",
+ "Same as --ndb-use-exact-count.",
(uchar**) &global_system_variables.ndb_use_exact_count,
(uchar**) &global_system_variables.ndb_use_exact_count,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
@@ -6450,7 +6458,7 @@ master-ssl",
(uchar**) &global_system_variables.ndb_use_transactions,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS,
- "same as --ndb-use-transactions.",
+ "Same as --ndb-use-transactions.",
(uchar**) &global_system_variables.ndb_use_transactions,
(uchar**) &global_system_variables.ndb_use_transactions,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
@@ -6465,7 +6473,9 @@ master-ssl",
(uchar**) &opt_ndb_optimized_node_selection,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
{ "ndb-cache-check-time", OPT_NDB_CACHE_CHECK_TIME,
- "A dedicated thread is created to, at the given millisecons interval, invalidate the query cache if another MySQL server in the cluster has changed the data in the database.",
+ "A dedicated thread is created to, at the given milliseconds interval, "
+ "invalidate the query cache if another MySQL server in the cluster has "
+ "changed the data in the database.",
(uchar**) &opt_ndb_cache_check_time, (uchar**) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG,
0, 0, LONG_TIMEOUT, 0, 1, 0},
{"ndb-index-stat-enable", OPT_NDB_INDEX_STAT_ENABLE,
@@ -6480,12 +6490,13 @@ master-ssl",
(uchar**) &global_system_variables.ndb_use_copying_alter_table,
(uchar**) &global_system_variables.ndb_use_copying_alter_table,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"new", 'n', "Use very new possible 'unsafe' functions.",
+ {"new", 'n', "Use very new, possibly 'unsafe', functions.",
(uchar**) &global_system_variables.new_mode,
(uchar**) &max_system_variables.new_mode,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef NOT_YET
- {"no-mix-table-types", OPT_NO_MIX_TYPE, "Don't allow commands with uses two different table types.",
+ {"no-mix-table-types", OPT_NO_MIX_TYPE,
+ "Don't allow commands that use two different table types.",
(uchar**) &opt_no_mix_types, (uchar**) &opt_no_mix_types, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
#endif
@@ -6499,10 +6510,12 @@ master-ssl",
(uchar**) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"one-thread", OPT_ONE_THREAD,
- "(deprecated): Only use one thread (for debugging under Linux). Use thread-handling=no-threads instead",
+ "(Deprecated): Only use one thread (for debugging under Linux). Use "
+ "thread-handling=no-threads instead.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS,
- "Enable old-style user limits (before 5.0.3 user resources were counted per each user+host vs. per account)",
+ "Enable old-style user limits (before 5.0.3, user resources were counted "
+ "per each user+host vs. per account).",
(uchar**) &opt_old_style_user_limits, (uchar**) &opt_old_style_user_limits,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.",
@@ -6518,10 +6531,10 @@ master-ssl",
(uchar**) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"port-open-timeout", OPT_PORT_OPEN_TIMEOUT,
"Maximum time in seconds to wait for the port to become free. "
- "(Default: no wait)", (uchar**) &mysqld_port_timeout,
+ "(Default: No wait).", (uchar**) &mysqld_port_timeout,
(uchar**) &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
- {"profiling_history_size", OPT_PROFILING, "Limit of query profiling memory",
+ {"profiling_history_size", OPT_PROFILING, "Limit of query profiling memory.",
(uchar**) &global_system_variables.profiling_history_size,
(uchar**) &max_system_variables.profiling_history_size,
0, GET_ULONG, REQUIRED_ARG, 15, 0, 100, 0, 0, 0},
@@ -6550,7 +6563,10 @@ thread is in the relay logs.",
"Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE,
- "Tells the slave thread to not replicate to the specified table. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-datbase updates, in contrast to replicate-ignore-db.",
+ "Tells the slave thread to not replicate to the specified table. To specify "
+ "more than one table to ignore, use the directive multiple times, once for "
+ "each table. This will work for cross-database updates, in contrast to "
+ "replicate-ignore-db.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB,
"Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.",
@@ -6572,7 +6588,13 @@ Can't be set to 1 if --log-slave-updates is used.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
// In replication, we may need to tell the other servers how to connect
{"report-host", OPT_REPORT_HOST,
- "Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.",
+ "Hostname or IP of the slave to be reported to the master during slave "
+ "registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset "
+ "if you do not want the slave to register itself with the master. Note that "
+ "it is not sufficient for the master to simply read the IP of the slave "
+ "from the socket once the slave connects. Due to NAT and other routing "
+ "issues, that IP may not be valid for connecting to the slave from the "
+ "master or other hosts.",
(uchar**) &report_host, (uchar**) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
{"report-password", OPT_REPORT_PASSWORD, "Undocumented.",
@@ -6591,7 +6613,7 @@ Can't be set to 1 if --log-slave-updates is used.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef TO_BE_DELETED
{"safe-show-database", OPT_SAFE_SHOW_DB,
- "Deprecated option; use GRANT SHOW DATABASES instead...",
+ "Deprecated option; use GRANT SHOW DATABASES instead.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"safe-user-create", OPT_SAFE_USER_CREATE,
@@ -6605,7 +6627,7 @@ Can't be set to 1 if --log-slave-updates is used.",
(uchar**) &opt_secure_auth, (uchar**) &opt_secure_auth, 0, GET_BOOL, NO_ARG,
my_bool(0), 0, 0, 0, 0, 0},
{"secure-file-priv", OPT_SECURE_FILE_PRIV,
- "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory",
+ "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory.",
(uchar**) &opt_secure_file_priv, (uchar**) &opt_secure_file_priv, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"server-id", OPT_SERVER_ID,
@@ -6613,7 +6635,8 @@ Can't be set to 1 if --log-slave-updates is used.",
(uchar**) &server_id, (uchar**) &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, UINT_MAX32,
0, 0, 0},
{"set-variable", 'O',
- "Change the value of a variable. Please note that this option is deprecated;you can set variables directly with --variable-name=value.",
+ "Change the value of a variable. Please note that this option is deprecated; "
+ "you can set variables directly with --variable-name=value.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
{"shared-memory", OPT_ENABLE_SHARED_MEMORY,
@@ -6626,12 +6649,12 @@ Can't be set to 1 if --log-slave-updates is used.",
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"show-slave-auth-info", OPT_SHOW_SLAVE_AUTH_INFO,
- "Show user and password in SHOW SLAVE HOSTS on this master",
+ "Show user and password in SHOW SLAVE HOSTS on this master.",
(uchar**) &opt_show_slave_auth_info, (uchar**) &opt_show_slave_auth_info, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef DISABLE_GRANT_OPTIONS
{"skip-grant-tables", OPT_SKIP_GRANT,
- "Start without grant tables. This gives all users FULL ACCESS to all tables!",
+ "Start without grant tables. This gives all users FULL ACCESS to all tables.",
(uchar**) &opt_noacl, (uchar**) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
#endif
@@ -6646,7 +6669,7 @@ Can't be set to 1 if --log-slave-updates is used.",
{"skip-networking", OPT_SKIP_NETWORKING,
"Don't allow connection with TCP/IP.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0,
0, 0, 0},
- {"skip-new", OPT_SKIP_NEW, "Don't use new, possible wrong routines.",
+ {"skip-new", OPT_SKIP_NEW, "Don't use new, possibly wrong routines.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef DBUG_OFF
#ifdef SAFEMALLOC
@@ -6664,7 +6687,7 @@ Can't be set to 1 if --log-slave-updates is used.",
{"skip-stack-trace", OPT_SKIP_STACK_TRACE,
"Don't print a stack trace on failure.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
0, 0, 0, 0},
- {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.",
+ {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip-thread-priority", OPT_SKIP_PRIOR,
"Don't give threads different priorities. Deprecated option.", 0, 0, 0, GET_NO_ARG, NO_ARG,
@@ -6679,11 +6702,11 @@ replicating a LOAD DATA INFILE command.",
"Tells the slave thread to continue replication when a query event returns an error from the provided list.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"slave-exec-mode", OPT_SLAVE_EXEC_MODE,
- "Modes for how replication events should be executed. Legal values are STRICT (default) and IDEMPOTENT. In IDEMPOTENT mode, replication will not stop for operations that are idempotent. In STRICT mode, replication will stop on any unexpected difference between the master and the slave.",
+ "Modes for how replication events should be executed. Legal values are STRICT (default) and IDEMPOTENT. In IDEMPOTENT mode, replication will not stop for operations that are idempotent. In STRICT mode, replication will stop on any unexpected difference between the master and the slave.",
(uchar**) &slave_exec_mode_str, (uchar**) &slave_exec_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"slow-query-log", OPT_SLOW_LOG,
- "Enable|disable slow query log", (uchar**) &opt_slow_log,
+ "Enable/disable slow query log.", (uchar**) &opt_slow_log,
(uchar**) &opt_slow_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"socket", OPT_SOCKET, "Socket file to use for connection.",
(uchar**) &mysqld_unix_port, (uchar**) &mysqld_unix_port, 0, GET_STR,
@@ -6753,8 +6776,8 @@ log and this option does nothing anymore.",
(uchar**) &opt_expect_abort, (uchar**) &opt_expect_abort,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"timed_mutexes", OPT_TIMED_MUTEXES,
- "Specify whether to time mutexes (only InnoDB mutexes are currently supported)",
- (uchar**) &timed_mutexes, (uchar**) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
+ "Specify whether to time mutexes (only InnoDB mutexes are currently supported).",
+ (uchar**) &timed_mutexes, (uchar**) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files. Several paths may be specified, separated by a "
@@ -6774,7 +6797,7 @@ log and this option does nothing anymore.",
IF_VALGRIND(0,1), 0, 0, 0, 0, 0},
{"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
- {"verbose", 'v', "Used with --help option for detailed help",
+ {"verbose", 'v', "Used with --help option for detailed help.",
(uchar**) &opt_verbose, (uchar**) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
{"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
@@ -6792,7 +6815,7 @@ log and this option does nothing anymore.",
(uchar**) &binlog_cache_size, (uchar**) &binlog_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, IO_SIZE, (longlong) ULONG_MAX, 0, IO_SIZE, 0},
{"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
- "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
+ "Size of tree cache used in bulk insert optimization. Note that this is a limit per thread.",
(uchar**) &global_system_variables.bulk_insert_buff_size,
(uchar**) &max_system_variables.bulk_insert_buff_size,
0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, (longlong) ULONG_MAX, 0, 1, 0},
@@ -6801,7 +6824,7 @@ log and this option does nothing anymore.",
(uchar**) &connect_timeout, (uchar**) &connect_timeout,
0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 },
{ "date_format", OPT_DATE_FORMAT,
- "The DATE format (For future).",
+ "The DATE format (for future).",
(uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
(uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -6843,7 +6866,7 @@ log and this option does nothing anymore.",
(uchar**) &flush_time, (uchar**) &flush_time, 0, GET_ULONG, REQUIRED_ARG,
FLUSH_TIME, 0, LONG_TIMEOUT, 0, 1, 0},
{ "ft_boolean_syntax", OPT_FT_BOOLEAN_SYNTAX,
- "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)",
+ "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE).",
0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "ft_max_word_len", OPT_FT_MAX_WORD_LEN,
@@ -6855,7 +6878,7 @@ log and this option does nothing anymore.",
(uchar**) &ft_min_word_len, (uchar**) &ft_min_word_len, 0, GET_ULONG,
REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT,
- "Number of best matches to use for query expansion",
+ "Number of best matches to use for query expansion.",
(uchar**) &ft_query_expansion_limit, (uchar**) &ft_query_expansion_limit, 0, GET_ULONG,
REQUIRED_ARG, 20, 0, 1000, 0, 1, 0},
{ "ft_stopword_file", OPT_FT_STOPWORD_FILE,
@@ -6863,7 +6886,7 @@ log and this option does nothing anymore.",
(uchar**) &ft_stopword_file, (uchar**) &ft_stopword_file, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN,
- "The maximum length of the result of function group_concat.",
+ "The maximum length of the result of function group_concat.",
(uchar**) &global_system_variables.group_concat_max_len,
(uchar**) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, (longlong) ULONG_MAX, 0, 1, 0},
@@ -6884,26 +6907,32 @@ log and this option does nothing anymore.",
(uchar**) &max_system_variables.keep_files_on_create,
0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"key_buffer_size", OPT_KEY_BUFFER_SIZE,
- "The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.",
+ "The size of the buffer used for index blocks for MyISAM tables. Increase "
+ "this to get better index handling (for all reads and multiple writes) to "
+ "as much as you can afford; 1GB on a 4GB machine that mainly runs MySQL is "
+ "quite common.",
(uchar**) &dflt_key_cache_var.param_buff_size,
(uchar**) 0,
0, (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, SIZE_T_MAX, MALLOC_OVERHEAD,
IO_SIZE, 0},
{"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
- "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
+ "This characterizes the number of hits a hot block has to be untouched "
+ "until it is considered aged enough to be downgraded to a warm block. "
+ "This specifies the percentage ratio of that number of hits to the total "
+ "number of blocks in key cache.",
(uchar**) &dflt_key_cache_var.param_age_threshold,
(uchar**) 0,
0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
300, 100, (longlong) ULONG_MAX, 0, 100, 0},
{"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
- "The default size of key cache blocks",
+ "The default size of key cache blocks.",
(uchar**) &dflt_key_cache_var.param_block_size,
(uchar**) 0,
0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
KEY_CACHE_BLOCK_SIZE, 512, 1024 * 16, 0, 512, 0},
{"key_cache_division_limit", OPT_KEY_CACHE_DIVISION_LIMIT,
- "The minimum percentage of warm blocks in key cache",
+ "The minimum percentage of warm blocks in key cache.",
(uchar**) &dflt_key_cache_var.param_division_limit,
(uchar**) 0,
0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100,
@@ -6924,8 +6953,9 @@ log and this option does nothing anymore.",
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
- "Log all queries that have taken more than long_query_time seconds to execute to file. "
- "The argument will be treated as a decimal value with microsecond precission.",
+ "Log all queries that have taken more than long_query_time seconds to "
+ "execute. The argument will be treated as a decimal value with "
+ "microsecond precision.",
(uchar**) &long_query_time, (uchar**) &long_query_time, 0, GET_DOUBLE,
REQUIRED_ARG, 10, 0, LONG_TIMEOUT, 0, 0, 0},
{"log-slow-time", OPT_LONG_QUERY_TIME,
@@ -6934,7 +6964,9 @@ log and this option does nothing anymore.",
(uchar**) &long_query_time, (uchar**) &long_query_time, 0, GET_DOUBLE,
REQUIRED_ARG, 10, 0, LONG_TIMEOUT, 0, 0, 0},
{"lower_case_table_names", OPT_LOWER_CASE_TABLE_NAMES,
- "If set to 1 table names are stored in lowercase on disk and table names will be case-insensitive. Should be set to 2 if you are using a case insensitive file system",
+ "If set to 1, table names are stored in lowercase on disk and table names "
+ "will be case-insensitive. Should be set to 2 if you are using a case-"
+ "insensitive file system.",
(uchar**) &lower_case_table_names,
(uchar**) &lower_case_table_names, 0, GET_UINT, OPT_ARG,
#ifdef FN_NO_CASE_SENCE
@@ -6944,7 +6976,7 @@ log and this option does nothing anymore.",
#endif
, 0, 2, 0, 1, 0},
{"max_allowed_packet", OPT_MAX_ALLOWED_PACKET,
- "Max packetlength to send/receive from to server.",
+ "The maximum packet length to send to or receive from server.",
(uchar**) &global_system_variables.max_allowed_packet,
(uchar**) &max_system_variables.max_allowed_packet, 0, GET_ULONG,
REQUIRED_ARG, 1024*1024L, 1024, 1024L*1024L*1024L, 0, 1024, 0},
@@ -7003,7 +7035,7 @@ The minimum value for this variable is 4096.",
(uchar**) &max_relay_log_size, (uchar**) &max_relay_log_size, 0, GET_ULONG,
REQUIRED_ARG, 0L, 0L, 1024*1024L*1024L, 0, IO_SIZE, 0},
{ "max_seeks_for_key", OPT_MAX_SEEKS_FOR_KEY,
- "Limit assumed max number of seeks when looking up rows based on a key",
+ "Limit assumed max number of seeks when looking up rows based on a key.",
(uchar**) &global_system_variables.max_seeks_for_key,
(uchar**) &max_system_variables.max_seeks_for_key, 0, GET_ULONG,
REQUIRED_ARG, (longlong) ULONG_MAX, 1, (longlong) ULONG_MAX, 0, 1, 0 },
@@ -7062,7 +7094,9 @@ The minimum value for this variable is 4096.",
(uchar**) &myisam_mmap_size, (uchar**) &myisam_mmap_size, 0,
GET_ULL, REQUIRED_ARG, SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 0, 1, 0},
{"myisam_repair_threads", OPT_MYISAM_REPAIR_THREADS,
- "Number of threads to use when repairing MyISAM tables. The value of 1 disables parallel repair.",
+ "Specifies whether several threads should be used when repairing MyISAM "
+ "tables. For values > 1, one thread is used per index. The value of 1 "
+ "disables parallel repair.",
(uchar**) &global_system_variables.myisam_repair_threads,
(uchar**) &max_system_variables.myisam_repair_threads, 0,
GET_ULONG, REQUIRED_ARG, 1, 1, (longlong) ULONG_MAX, 0, 1, 0},
@@ -7070,9 +7104,9 @@ The minimum value for this variable is 4096.",
"The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE.",
(uchar**) &global_system_variables.myisam_sort_buff_size,
(uchar**) &max_system_variables.myisam_sort_buff_size, 0,
- GET_ULONG, REQUIRED_ARG, 8192*1024, 4, (longlong) ULONG_MAX, 0, 1, 0},
+ GET_ULONG, REQUIRED_ARG, 8192 * 1024, 4096, (longlong) ULONG_MAX, 0, 1, 0},
{"myisam_use_mmap", OPT_MYISAM_USE_MMAP,
- "Use memory mapping for reading and writing MyISAM tables",
+ "Use memory mapping for reading and writing MyISAM tables.",
(uchar**) &opt_myisam_use_mmap,
(uchar**) &opt_myisam_use_mmap, 0, GET_BOOL, NO_ARG, 0,
0, 0, 0, 0, 0},
@@ -7099,7 +7133,8 @@ The minimum value for this variable is 4096.",
GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, (longlong) ULONG_MAX,
0, 1, 0},
{"net_write_timeout", OPT_NET_WRITE_TIMEOUT,
- "Number of seconds to wait for a block to be written to a connection before aborting the write.",
+ "Number of seconds to wait for a block to be written to a connection before "
+ "aborting the write.",
(uchar**) &global_system_variables.net_write_timeout,
(uchar**) &max_system_variables.net_write_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
@@ -7142,12 +7177,12 @@ The minimum value for this variable is 4096.",
(uchar**) &opt_plugin_load, (uchar**) &opt_plugin_load, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE,
- "The size of the buffer that is allocated when preloading indexes",
+ "The size of the buffer that is allocated when preloading indexes.",
(uchar**) &global_system_variables.preload_buff_size,
(uchar**) &max_system_variables.preload_buff_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, 1024, 1024*1024*1024L, 0, 1, 0},
{"query_alloc_block_size", OPT_QUERY_ALLOC_BLOCK_SIZE,
- "Allocation block size for query parsing and execution",
+ "Allocation block size for query parsing and execution.",
(uchar**) &global_system_variables.query_alloc_block_size,
(uchar**) &max_system_variables.query_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024,
@@ -7158,7 +7193,8 @@ The minimum value for this variable is 4096.",
(uchar**) &query_cache_limit, (uchar**) &query_cache_limit, 0, GET_ULONG,
REQUIRED_ARG, 1024*1024L, 0, (longlong) ULONG_MAX, 0, 1, 0},
{"query_cache_min_res_unit", OPT_QUERY_CACHE_MIN_RES_UNIT,
- "minimal size of unit in wich space for results is allocated (last unit will be trimed after writing all result data.",
+ "Minimal size of unit in which space for results is allocated (last unit "
+ "will be trimmed after writing all result data).",
(uchar**) &query_cache_min_res_unit, (uchar**) &query_cache_min_res_unit,
0, GET_ULONG, REQUIRED_ARG, QUERY_CACHE_MIN_RESULT_DATA_SIZE,
0, (longlong) ULONG_MAX, 0, 1, 0},
@@ -7174,19 +7210,19 @@ The minimum value for this variable is 4096.",
(uchar**) &max_system_variables.query_cache_type,
0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0},
{"query_cache_wlock_invalidate", OPT_QUERY_CACHE_WLOCK_INVALIDATE,
- "Invalidate queries in query cache on LOCK for write",
+ "Invalidate queries in query cache on LOCK for write.",
(uchar**) &global_system_variables.query_cache_wlock_invalidate,
(uchar**) &max_system_variables.query_cache_wlock_invalidate,
0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
#endif /*HAVE_QUERY_CACHE*/
{"query_prealloc_size", OPT_QUERY_PREALLOC_SIZE,
- "Persistent buffer for query parsing and execution",
+ "Persistent buffer for query parsing and execution.",
(uchar**) &global_system_variables.query_prealloc_size,
(uchar**) &max_system_variables.query_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE,
(longlong) ULONG_MAX, 0, 1024, 0},
{"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
- "Allocation block size for storing ranges during optimization",
+ "Allocation block size for storing ranges during optimization.",
(uchar**) &global_system_variables.range_alloc_block_size,
(uchar**) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE,
@@ -7198,12 +7234,15 @@ The minimum value for this variable is 4096.",
128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE,
0},
{"read_only", OPT_READONLY,
- "Make all non-temporary tables read-only, with the exception for replication (slave) threads and users with the SUPER privilege",
+ "Make all non-temporary tables read-only, with the exception of replication "
+ "(slave) threads and users with the SUPER privilege.",
(uchar**) &opt_readonly,
(uchar**) &opt_readonly,
0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER,
- "When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.",
+ "When reading rows in sorted order after a sort, the rows are read through "
+ "this buffer to avoid disk seeks. If not set, then it's set to the value of "
+ "record_buffer.",
(uchar**) &global_system_variables.read_rnd_buff_size,
(uchar**) &max_system_variables.read_rnd_buff_size, 0,
GET_ULONG, REQUIRED_ARG, 256*1024L, IO_SIZE*2+MALLOC_OVERHEAD,
@@ -7286,7 +7325,8 @@ The minimum value for this variable is 4096.",
DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
#if HAVE_POOL_OF_THREADS == 1
{"thread_pool_size", OPT_THREAD_CACHE_SIZE,
- "How many threads we should create to handle query requests in case of 'thread_handling=pool-of-threads'",
+ "How many threads we should create to handle query requests in case of "
+ "'thread_handling=pool-of-threads'.",
(uchar**) &thread_pool_size, (uchar**) &thread_pool_size, 0, GET_ULONG,
REQUIRED_ARG, 20, 1, 16384, 0, 1, 0},
#endif
@@ -7306,20 +7346,20 @@ The minimum value for this variable is 4096.",
(uchar**) &max_system_variables.tmp_table_size, 0, GET_ULL,
REQUIRED_ARG, 16*1024*1024L, 0, MAX_MEM_TABLE_SIZE, 0, 1, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
- "Allocation block size for transactions to be stored in binary log",
+ "Allocation block size for transactions to be stored in binary log.",
(uchar**) &global_system_variables.trans_alloc_block_size,
(uchar**) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024,
0},
{"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE,
- "Persistent buffer for transactions to be stored in binary log",
+ "Persistent buffer for transactions to be stored in binary log.",
(uchar**) &global_system_variables.trans_prealloc_size,
(uchar**) &max_system_variables.trans_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, (longlong) ULONG_MAX, 0,
1024, 0},
{"thread_handling", OPT_THREAD_HANDLING,
- "Define threads usage for handling queries: "
- "one-thread-per-connection or no-threads",
+ "Define threads usage for handling queries: "
+ "one-thread-per-connection or no-threads.",
(uchar**) &opt_thread_handling, (uchar**) &opt_thread_handling,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT,
@@ -7334,7 +7374,11 @@ The minimum value for this variable is 4096.",
REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
0, 1, 0},
{"binlog-direct-non-transactional-updates", OPT_BINLOG_DIRECT_NON_TRANS_UPDATE,
- "Causes updates to non-transactional engines using statement format to be written directly to binary log. Before using this option make sure that there are no dependencies between transactional and non-transactional tables such as in the statement INSERT INTO t_myisam SELECT * FROM t_innodb; otherwise, slaves may diverge from the master.",
+ "Causes updates to non-transactional engines using statement format to be "
+ "written directly to binary log. Before using this option, make sure that "
+ "there are no dependencies between transactional and non-transactional "
+ "tables such as in the statement INSERT INTO t_myisam SELECT * FROM "
+ "t_innodb; otherwise, slaves may diverge from the master.",
(uchar**) &global_system_variables.binlog_direct_non_trans_update, (uchar**) &max_system_variables.binlog_direct_non_trans_update, 0, GET_BOOL, NO_ARG, 0,
0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
@@ -7387,7 +7431,8 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_MY_BOOL;
pthread_mutex_lock(&LOCK_active_mi);
var->value= buff;
- *((my_bool *)buff)= (my_bool) (active_mi && active_mi->slave_running &&
+ *((my_bool *)buff)= (my_bool) (active_mi &&
+ active_mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
active_mi->rli.slave_running);
pthread_mutex_unlock(&LOCK_active_mi);
return 0;
@@ -7851,27 +7896,27 @@ static void usage(void)
default_collation_name= (char*) default_charset_info->name;
print_version();
puts("\
-Copyright (C) 2000-2008 MySQL AB, by Monty and others\n\
+Copyright (C) 2000-2008 MySQL AB, by Monty and others.\n\
Copyright (C) 2008 Sun Microsystems, Inc.\n\
This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\
and you are welcome to modify and redistribute it under the GPL license\n\n\
-Starts the MySQL database server\n");
+Starts the MySQL database server.\n");
printf("Usage: %s [OPTIONS]\n", my_progname);
if (!opt_verbose)
- puts("\nFor more help options (several pages), use mysqld --verbose --help");
+ puts("\nFor more help options (several pages), use mysqld --verbose --help.");
else
{
#ifdef __WIN__
puts("NT and Win32 specific options:\n\
- --install Install the default service (NT)\n\
- --install-manual Install the default service started manually (NT)\n\
- --install service_name Install an optional service (NT)\n\
- --install-manual service_name Install an optional service started manually (NT)\n\
- --remove Remove the default service from the service list (NT)\n\
- --remove service_name Remove the service_name from the service list (NT)\n\
- --enable-named-pipe Only to be used for the default server (NT)\n\
- --standalone Dummy option to start as a standalone server (NT)\
+ --install Install the default service (NT).\n\
+ --install-manual Install the default service started manually (NT).\n\
+ --install service_name Install an optional service (NT).\n\
+ --install-manual service_name Install an optional service started manually (NT).\n\
+ --remove Remove the default service from the service list (NT).\n\
+ --remove service_name Remove the service_name from the service list (NT).\n\
+ --enable-named-pipe Only to be used for the default server (NT).\n\
+ --standalone Dummy option to start as a standalone server (NT).\
");
puts("");
#endif
@@ -7928,6 +7973,7 @@ static int mysql_init_variables(void)
log_output_options= find_bit_type(log_output_str, &log_output_typelib);
opt_bin_log= 0;
opt_disable_networking= opt_skip_show_db=0;
+ opt_skip_name_resolve= 0;
opt_ignore_builtin_innodb= 0;
opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0;
opt_tc_log_file= (char *)"tc.log"; // no hostname in tc_log file name !
@@ -8517,6 +8563,7 @@ mysqld_get_one_option(int optid,
opt_specialflag|= SPECIAL_NO_HOST_CACHE;
break;
case (int) OPT_SKIP_RESOLVE:
+ opt_skip_name_resolve= 1;
opt_specialflag|=SPECIAL_NO_RESOLVE;
break;
case (int) OPT_SKIP_NETWORKING:
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 30a6c3bb2fc..280154c0b52 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1171,11 +1171,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
if (file)
{
range_end();
- if (head->key_read)
- {
- head->key_read= 0;
- file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ head->set_keyread(FALSE);
if (free_file)
{
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
@@ -1377,10 +1373,7 @@ end:
head->file= file;
/* We don't have to set 'head->keyread' here as the 'file' is unique */
if (!head->no_keyread)
- {
- head->key_read= 1;
head->mark_columns_used_by_index(index);
- }
head->prepare_for_position();
head->file= org_file;
bitmap_copy(&column_bitmap, head->read_set);
@@ -8166,7 +8159,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
/* We're going to just read rowids. */
- file->extra(HA_EXTRA_KEYREAD);
+ head->set_keyread(TRUE);
head->prepare_for_position();
cur_quick_it.rewind();
@@ -8242,7 +8235,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
delete unique;
doing_pk_scan= FALSE;
/* index_merge currently doesn't support "using index" at all */
- file->extra(HA_EXTRA_NO_KEYREAD);
+ head->set_keyread(FALSE);
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE);
DBUG_RETURN(result);
}
@@ -10629,7 +10622,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
int result;
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
- file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
+ head->set_keyread(TRUE); /* We need only the key attributes */
if ((result= file->ha_index_init(index,1)))
DBUG_RETURN(result);
if (quick_prefix_select && quick_prefix_select->reset())
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 87ef3af6e44..58906c7acab 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/**
@@ -96,7 +96,7 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables)
@param conds WHERE clause
@note
- This function is only called for queries with sum functions and no
+ This function is only called for queries with aggregate functions and no
GROUP BY part. This means that the result set shall contain a single
row only
@@ -326,11 +326,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
if (!error && reckey_in_range(0, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
- if (table->key_read)
- {
- table->key_read= 0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
table->file->ha_index_end();
if (error)
{
@@ -413,11 +409,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
if (!error && reckey_in_range(1, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
table->file->ha_index_end();
if (error)
{
@@ -567,31 +559,57 @@ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order)
/**
Check whether a condition matches a key to get {MAX|MIN}(field):.
- For the index specified by the keyinfo parameter, index that
- contains field as its component (field_part), the function
- checks whether the condition cond is a conjunction and all its
- conjuncts referring to the columns of the same table as column
- field are one of the following forms:
- - f_i= const_i or const_i= f_i or f_i is null,
- where f_i is part of the index
- - field {<|<=|>=|>|=} const or const {<|<=|>=|>|=} field
- - field between const1 and const2
-
- @param[in] max_fl Set to 1 if we are optimising MAX()
- @param[in,out] ref Reference to the structure we store the key
- value
- @param[in] keyinfo Reference to the key info
- @param[in] field_part Pointer to the key part for the field
- @param[in] cond WHERE condition
- @param[in,out] key_part_used Map of matchings parts
- @param[in,out] range_fl Says whether including key will be used
- @param[out] prefix_len Length of common key part for the range
- where MAX/MIN is searched for
+ For the index specified by the keyinfo parameter and an index that
+ contains the field as its component (field_part), the function
+ checks whether
+
+ - the condition cond is a conjunction,
+ - all of its conjuncts refer to columns of the same table, and
+ - each conjunct is on one of the following forms:
+ - f_i = const_i or const_i = f_i or f_i IS NULL,
+ where f_i is part of the index
+ - field {<|<=|>=|>|=} const
+ - const {<|<=|>=|>|=} field
+ - field BETWEEN const_1 AND const_2
+
+ As a side-effect, the key value to be used for looking up the MIN/MAX value
+ is actually stored inside the Field object. An interesting feature is that
+ the function will find the most restrictive endpoint by over-eager
+ evaluation of the @c WHERE condition. It continually stores the current
+ endpoint inside the Field object. For a query such as
+
+ @code
+ SELECT MIN(a) FROM t1 WHERE a > 3 AND a > 5;
+ @endcode
+
+ the algorithm will recurse over the conjuction, storing first a 3 in the
+ field. In the next recursive invocation the expression a > 5 is evaluated
+ as 3 > 5 (Due to the dual nature of Field objects as value carriers and
+ field identifiers), which will obviously fail, leading to 5 being stored in
+ the Field object.
+
+ @param[in] max_fl Set to true if we are optimizing MAX(),
+ false means we are optimizing %MIN()
+ @param[in, out] ref Reference to the structure where the function
+ stores the key value
+ @param[in] keyinfo Reference to the key info
+ @param[in] field_part Pointer to the key part for the field
+ @param[in] cond WHERE condition
+ @param[in,out] key_part_used Map of matchings parts. The function will output
+ the set of key parts actually being matched in
+ this set, yet it relies on the caller to
+ initialize the value to zero. This is due
+ to the fact that this value is passed
+ recursively.
+ @param[in,out] range_fl Says whether endpoints use strict greater/less
+ than.
+ @param[out] prefix_len Length of common key part for the range
+ where MAX/MIN is searched for
@retval
- 0 Index can't be used.
+ false Index can't be used.
@retval
- 1 We can use index to get MIN/MAX value
+ true We can use the index to get MIN/MAX value
*/
static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
@@ -628,17 +646,20 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
return 0; // Not operator, can't optimize
bool eq_type= 0; // =, <=> or IS NULL
+ bool is_null_safe_eq= FALSE; // The operator is NULL safe, e.g. <=>
bool noeq_type= 0; // < or >
bool less_fl= 0; // < or <=
- bool is_null= 0;
- bool between= 0;
+ bool is_null= 0; // IS NULL
+ bool between= 0; // BETWEEN ... AND ...
switch (((Item_func*) cond)->functype()) {
case Item_func::ISNULL_FUNC:
is_null= 1; /* fall through */
case Item_func::EQ_FUNC:
+ eq_type= TRUE;
+ break;
case Item_func::EQUAL_FUNC:
- eq_type= 1;
+ eq_type= is_null_safe_eq= TRUE;
break;
case Item_func::LT_FUNC:
noeq_type= 1; /* fall through */
@@ -666,6 +687,10 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
if (!simple_pred((Item_func*) cond, args, &inv))
return 0;
+ if (!is_null_safe_eq && !is_null &&
+ (args[1]->is_null() || (between && args[2]->is_null())))
+ return FALSE;
+
if (inv && !eq_type)
less_fl= 1-less_fl; // Convert '<' -> '>' (etc)
@@ -716,15 +741,16 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
- field {>|>=} const, when searching for MIN
*/
- if (is_null)
+ if (is_null || (is_null_safe_eq && args[1]->is_null()))
{
part->field->set_null();
*key_ptr= (uchar) 1;
}
else
{
- store_val_in_field(part->field, args[between && max_fl ? 2 : 1],
- CHECK_FIELD_IGNORE);
+ /* Update endpoints for MAX/MIN, see function comment. */
+ Item *value= args[between && max_fl ? 2 : 1];
+ store_val_in_field(part->field, value, CHECK_FIELD_IGNORE);
if (part->null_bit)
*key_ptr++= (uchar) test(part->field->is_null());
part->field->get_key_image(key_ptr, part->length, Field::itRAW);
@@ -876,10 +902,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
converted (for example to upper case)
*/
if (field->part_of_key.is_set(idx))
- {
- table->key_read= 1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->set_keyread(TRUE);
return 1;
}
}
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 11b4c085505..41ebbcfba90 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -912,7 +912,7 @@ bool Protocol_text::store(const char *from, size_t length,
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
#ifndef DBUG_OFF
DBUG_PRINT("info", ("Protocol_text::store field %u (%u): %.*s", field_pos,
- field_count, (int) length, from));
+ field_count, (int) length, (length == 0? "" : from)));
DBUG_ASSERT(field_pos < field_count);
DBUG_ASSERT(field_types == 0 ||
field_types[field_pos] == MYSQL_TYPE_DECIMAL ||
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index e34f8561051..6058c473e9f 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -206,7 +206,7 @@ table_def::compatible_with(Relay_log_info const *rli_arg, TABLE *table)
Check the slave's field size against that of the master.
*/
if (!error &&
- !field->compatible_field_size(field_metadata(col), rli_arg))
+ !field->compatible_field_size(field_metadata(col), rli_arg, m_flags))
{
error= 1;
char buf[256];
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index d011e9aade8..b209c9140d1 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -32,12 +32,6 @@ class Relay_log_info;
- Extract and decode table definition data from the table map event
- Check if table definition in table map is compatible with table
definition on slave
-
- Currently, the only field type data available is an array of the
- type operators that are present in the table map event.
-
- @todo Add type operands to this structure to allow detection of
- difference between, e.g., BIT(5) and BIT(10).
*/
class table_def
@@ -59,9 +53,9 @@ public:
@param null_bitmap The bitmap of fields that can be null
*/
table_def(field_type *types, ulong size, uchar *field_metadata,
- int metadata_size, uchar *null_bitmap)
+ int metadata_size, uchar *null_bitmap, uint16 flags)
: m_size(size), m_type(0), m_field_metadata_size(metadata_size),
- m_field_metadata(0), m_null_bits(0), m_memory(NULL)
+ m_field_metadata(0), m_null_bits(0), m_flags(flags), m_memory(NULL)
{
m_memory= (uchar *)my_multi_malloc(MYF(MY_WME),
&m_type, size,
@@ -247,6 +241,7 @@ private:
uint m_field_metadata_size;
uint16 *m_field_metadata;
uchar *m_null_bits;
+ uint16 m_flags; // Table flags
uchar *m_memory;
};
diff --git a/sql/set_var.cc b/sql/set_var.cc
index bf126fb09e4..d546494e6a9 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -560,6 +560,10 @@ static sys_var_const sys_skip_show_database(&vars, "skip_show_database",
OPT_GLOBAL, SHOW_BOOL,
(uchar*) &opt_skip_show_db);
+static sys_var_const sys_skip_name_resolve(&vars, "skip_name_resolve",
+ OPT_GLOBAL, SHOW_BOOL,
+ (uchar*) &opt_skip_name_resolve);
+
static sys_var_const sys_socket(&vars, "socket",
OPT_GLOBAL, SHOW_CHAR_PTR,
(uchar*) &mysqld_unix_port);
@@ -3179,6 +3183,13 @@ static bool set_option_autocommit(THD *thd, set_var *var)
if ((org_options & OPTION_NOT_AUTOCOMMIT))
{
/* We changed to auto_commit mode */
+ if (thd->transaction.xid_state.xa_state != XA_NOTR)
+ {
+ thd->options= org_options;
+ my_error(ER_XAER_RMFAIL, MYF(0),
+ xa_state_names[thd->transaction.xid_state.xa_state]);
+ return 1;
+ }
thd->options&= ~(ulonglong) (OPTION_BEGIN | OPTION_KEEP_LOG);
thd->transaction.all.modified_non_trans_table= FALSE;
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 093d345ba38..0f9e2a24a93 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5027,7 +5027,7 @@ ER_UNKNOWN_STORAGE_ENGINE 42000
# When using this error code, use ER(ER_WARN_DEPRECATED_SYNTAX_WITH_VER)
# for the message string. See, for example, code in mysql_priv.h.
ER_WARN_DEPRECATED_SYNTAX
- eng "'%s' is deprecated; use '%s' instead"
+ eng "'%s' is deprecated and will be removed in a future release. Please use %s instead"
ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
por "'%s' é desatualizado. Use '%s' em seu lugar"
spa "'%s' está desaprobado, use '%s' en su lugar"
diff --git a/sql/slave.cc b/sql/slave.cc
index 3639330c8e7..57cd8d0c7da 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -44,6 +44,7 @@
#ifdef HAVE_REPLICATION
#include "rpl_tblmap.h"
+#include "debug_sync.h"
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
@@ -931,7 +932,16 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
unavailable (very old master not supporting UNIX_TIMESTAMP()?).
*/
- DBUG_SYNC_POINT("debug_lock.before_get_UNIX_TIMESTAMP", 10);
+ DBUG_EXECUTE_IF("dbug.before_get_UNIX_TIMESTAMP",
+ {
+ const char act[]=
+ "now "
+ "wait_for signal.get_unix_timestamp";
+ DBUG_ASSERT(opt_debug_sync_timeout > 0);
+ DBUG_ASSERT(!debug_sync_set_action(current_thd,
+ STRING_WITH_LEN(act)));
+ };);
+
master_res= NULL;
if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) &&
(master_res= mysql_store_result(mysql)) &&
@@ -970,7 +980,15 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
Note: we could have put a @@SERVER_ID in the previous SELECT
UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters.
*/
- DBUG_SYNC_POINT("debug_lock.before_get_SERVER_ID", 10);
+ DBUG_EXECUTE_IF("dbug.before_get_SERVER_ID",
+ {
+ const char act[]=
+ "now "
+ "wait_for signal.get_server_id";
+ DBUG_ASSERT(opt_debug_sync_timeout > 0);
+ DBUG_ASSERT(!debug_sync_set_action(current_thd,
+ STRING_WITH_LEN(act)));
+ };);
master_res= NULL;
master_row= NULL;
if (!mysql_real_query(mysql,
@@ -2518,6 +2536,16 @@ pthread_handler_t handle_slave_io(void *arg)
connected:
+ DBUG_EXECUTE_IF("dbug.before_get_running_status_yes",
+ {
+ const char act[]=
+ "now "
+ "wait_for signal.io_thread_let_running";
+ DBUG_ASSERT(opt_debug_sync_timeout > 0);
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+
// TODO: the assignment below should be under mutex (5.0)
mi->slave_running= MYSQL_SLAVE_RUN_CONNECT;
thd->slave_net = &mysql->net;
diff --git a/sql/sp.cc b/sql/sp.cc
index f0508142557..ef69edb96c6 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -1898,6 +1898,10 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
ret= SP_OK;
break;
default:
+ /* Query might have been killed, don't set error. */
+ if (thd->killed)
+ break;
+
/*
Any error when loading an existing routine is either some problem
with the mysql.proc table, or a parse error because the contents
diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc
index b60d82c9698..a3d3174b8c1 100644
--- a/sql/sp_cache.cc
+++ b/sql/sp_cache.cc
@@ -181,8 +181,9 @@ sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name)
sp_cache_invalidate()
NOTE
- This is called when a VIEW definition is modifed. We can't destroy sp_head
- objects here as one may modify VIEW definitions from prelocking-free SPs.
+ This is called when a VIEW definition is created or modified (and in some
+ other contexts). We can't destroy sp_head objects here as one may modify
+ VIEW definitions from prelocking-free SPs.
*/
void sp_cache_invalidate()
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 25cd1d8a9b4..fd9ef7464b0 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -1848,6 +1848,8 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
{
bool err_status= FALSE;
uint params = m_pcont->context_var_count();
+ /* Query start time may be reset in a multi-stmt SP; keep this for later. */
+ ulonglong utime_before_sp_exec= thd->utime_after_lock;
sp_rcontext *save_spcont, *octx;
sp_rcontext *nctx = NULL;
bool save_enable_slow_log;
@@ -2040,6 +2042,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
delete nctx;
thd->spcont= save_spcont;
+ thd->utime_after_lock= utime_before_sp_exec;
DBUG_RETURN(err_status);
}
@@ -3005,6 +3008,7 @@ int
sp_instr_set_trigger_field::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_set_trigger_field::execute");
+ thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this));
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 4416ebbc45c..691b743c713 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1519,6 +1519,7 @@ void close_temporary_tables(THD *thd)
{
if (is_user_table(table))
{
+ bool save_thread_specific_used= thd->thread_specific_used;
my_thread_id save_pseudo_thread_id= thd->variables.pseudo_thread_id;
/* Set pseudo_thread_id to be that of the processed table */
thd->variables.pseudo_thread_id= tmpkeyval(thd, table);
@@ -1548,6 +1549,7 @@ void close_temporary_tables(THD *thd)
thd->clear_error();
CHARSET_INFO *cs_save= thd->variables.character_set_client;
thd->variables.character_set_client= system_charset_info;
+ thd->thread_specific_used= TRUE;
Query_log_event qinfo(thd, s_query.ptr(),
s_query.length() - 1 /* to remove trailing ',' */,
0, FALSE, 0);
@@ -1560,6 +1562,7 @@ void close_temporary_tables(THD *thd)
"Failed to write the DROP statement for temporary tables to binary log");
}
thd->variables.pseudo_thread_id= save_pseudo_thread_id;
+ thd->thread_specific_used= save_thread_specific_used;
}
else
{
@@ -2176,6 +2179,7 @@ void wait_for_condition(THD *thd, pthread_mutex_t *mutex, pthread_cond_t *cond)
proc_info=thd->proc_info;
thd_proc_info(thd, "Waiting for table");
DBUG_ENTER("wait_for_condition");
+ DEBUG_SYNC(thd, "waiting_for_table");
if (!thd->killed)
(void) pthread_cond_wait(cond, mutex);
@@ -4613,7 +4617,20 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
safe_to_ignore_table= prelock_handler.safely_trapped_errors();
}
else
+ {
tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags);
+
+ /*
+ Skip further processing if there has been a fatal error while
+ trying to open a table. For example, this might happen due to
+ stack shortage, unknown definer in views, etc.
+ */
+ if (!tables->table && thd->is_error())
+ {
+ result= -1;
+ goto err;
+ }
+ }
}
else
DBUG_PRINT("tcache", ("referenced table: '%s'.'%s' 0x%lx",
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 580fe8057cd..51299d4f7e7 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -738,7 +738,7 @@ bool THD::handle_error(uint sql_errno, const char *message,
{
for (Internal_error_handler *error_handler= m_internal_handler;
error_handler;
- error_handler= m_internal_handler->m_prev_internal_handler)
+ error_handler= error_handler->m_prev_internal_handler)
{
if (error_handler->handle_error(sql_errno, message, level, this))
return TRUE;
@@ -747,10 +747,12 @@ bool THD::handle_error(uint sql_errno, const char *message,
}
-void THD::pop_internal_handler()
+Internal_error_handler *THD::pop_internal_handler()
{
DBUG_ASSERT(m_internal_handler != NULL);
+ Internal_error_handler *popped_handler= m_internal_handler;
m_internal_handler= m_internal_handler->m_prev_internal_handler;
+ return popped_handler;
}
extern "C"
@@ -3165,6 +3167,7 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
}
#endif
+ backup->count_cuted_fields= count_cuted_fields;
backup->options= options;
backup->in_sub_stmt= in_sub_stmt;
backup->enable_slow_log= enable_slow_log;
@@ -3203,6 +3206,7 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
void THD::restore_sub_statement_state(Sub_statement_state *backup)
{
+ DBUG_ENTER("THD::restore_sub_statement_state");
#ifndef EMBEDDED_LIBRARY
/* BUG#33029, if we are replicating from a buggy master, restore
auto_inc_intervals_forced so that the top statement can use the
@@ -3229,6 +3233,7 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup)
/* ha_release_savepoint() never returns error. */
(void)ha_release_savepoint(this, sv);
}
+ count_cuted_fields= backup->count_cuted_fields;
transaction.savepoints= backup->savepoints;
options= backup->options;
in_sub_stmt= backup->in_sub_stmt;
@@ -3259,6 +3264,7 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup)
*/
examined_row_count+= backup->examined_row_count;
cuted_fields+= backup->cuted_fields;
+ DBUG_VOID_RETURN;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index aa39ddb2b15..8c4c60c02bb 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1007,6 +1007,7 @@ public:
bool enable_slow_log;
bool last_insert_id_used;
SAVEPOINT *savepoints;
+ enum enum_check_fields count_cuted_fields;
};
@@ -2333,7 +2334,7 @@ public:
/**
Remove the error handler last pushed.
*/
- void pop_internal_handler();
+ Internal_error_handler *pop_internal_handler();
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 4fc618ca050..6b395fb2636 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -50,6 +50,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SELECT_LEX *select_lex= &thd->lex->select_lex;
THD::killed_state killed_status= THD::NOT_KILLED;
DBUG_ENTER("mysql_delete");
+ bool save_binlog_row_based;
THD::enum_binlog_query_type query_type=
thd->lex->sql_command == SQLCOM_TRUNCATE ?
@@ -147,12 +148,14 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
query_type= THD::STMT_QUERY_TYPE;
error= -1; // ok
deleted= maybe_deleted;
+ save_binlog_row_based= thd->current_stmt_binlog_row_based;
goto cleanup;
}
if (error != HA_ERR_WRONG_COMMAND)
{
table->file->print_error(error,MYF(0));
error=0;
+ save_binlog_row_based= thd->current_stmt_binlog_row_based;
goto cleanup;
}
/* Handler didn't support fast delete; Delete rows one by one */
@@ -293,6 +296,11 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->mark_columns_needed_for_delete();
+ save_binlog_row_based= thd->current_stmt_binlog_row_based;
+ if (thd->lex->sql_command == SQLCOM_TRUNCATE &&
+ thd->current_stmt_binlog_row_based)
+ thd->clear_current_stmt_binlog_row_based();
+
while (!(error=info.read_record(&info)) && !thd->killed &&
! thd->is_error())
{
@@ -393,7 +401,10 @@ cleanup:
/* See similar binlogging code in sql_update.cc, for comments */
if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
{
- if (mysql_bin_log.is_open())
+ if (mysql_bin_log.is_open() &&
+ !(thd->lex->sql_command == SQLCOM_TRUNCATE &&
+ thd->current_stmt_binlog_row_based &&
+ find_temporary_table(thd, table_list)))
{
bool const is_trans=
thd->lex->sql_command == SQLCOM_TRUNCATE ?
@@ -427,6 +438,7 @@ cleanup:
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
}
+ thd->current_stmt_binlog_row_based= save_binlog_row_based;
DBUG_ASSERT(transactional_table || !deleted || thd->transaction.stmt.modified_non_trans_table);
free_underlaid_joins(thd, select_lex);
if (error < 0 ||
@@ -1062,15 +1074,13 @@ bool multi_delete::send_eof()
static bool mysql_truncate_by_delete(THD *thd, TABLE_LIST *table_list)
{
- bool error, save_binlog_row_based= thd->current_stmt_binlog_row_based;
+ bool error;
DBUG_ENTER("mysql_truncate_by_delete");
table_list->lock_type= TL_WRITE;
mysql_init_select(thd->lex);
- thd->clear_current_stmt_binlog_row_based();
error= mysql_delete(thd, table_list, NULL, NULL, HA_POS_ERROR, LL(0), TRUE);
ha_autocommit_or_rollback(thd, error);
end_trans(thd, error ? ROLLBACK : COMMIT);
- thd->current_stmt_binlog_row_based= save_binlog_row_based;
DBUG_RETURN(error);
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index fc53529da37..3836b259396 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -3190,7 +3190,7 @@ bool select_insert::send_data(List<Item> &values)
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
store_values(values);
- thd->count_cuted_fields= CHECK_FIELD_IGNORE;
+ thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
if (thd->is_error())
{
table->auto_increment_field_not_null= FALSE;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 5a3907d0f7f..411a7f5ec49 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1603,6 +1603,7 @@ void st_select_lex::init_query()
having= prep_having= where= prep_where= 0;
olap= UNSPECIFIED_OLAP_TYPE;
having_fix_field= 0;
+ group_fix_field= 0;
context.select_lex= this;
context.init();
/*
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 5e37e618250..90a7c66e0d0 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -647,6 +647,8 @@ public:
bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */
/* TRUE when having fix field called in processing of this SELECT */
bool having_fix_field;
+ /* TRUE when GROUP BY fix field called in processing of this SELECT */
+ bool group_fix_field;
/* List of references to fields referenced from inner selects */
List<Item_outer_ref> inner_refs_list;
/* Number of Item_sum-derived objects in this SELECT */
@@ -1727,6 +1729,7 @@ typedef struct st_lex : public Query_tables_list
- CREATE TRIGGER (points to "TRIGGER");
- CREATE PROCEDURE (points to "PROCEDURE");
- CREATE FUNCTION (points to "FUNCTION" or "AGGREGATE");
+ - CREATE EVENT (points to "EVENT")
This pointer is required to add possibly omitted DEFINER-clause to the
DDL-statement before dumping it to the binlog.
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 82cc8f81b4a..03606c11521 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -689,12 +689,10 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
strcpy(end, p);
end += pl;
- thd->set_query_inner(load_data_query, end - load_data_query);
-
Execute_load_query_log_event
- e(thd, thd->query(), thd->query_length(),
- (uint) ((char*) fname_start - (char*) thd->query() - 1),
- (uint) ((char*) fname_end - (char*) thd->query()),
+ e(thd, load_data_query, end-load_data_query,
+ (uint) ((char*) fname_start - load_data_query - 1),
+ (uint) ((char*) fname_end - load_data_query),
(duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
(ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
transactional_table, FALSE, errcode);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 2eb6a190e63..6d599d9c2df 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1717,9 +1717,9 @@ void log_slow_statement(THD *thd)
/*
Do not log administrative statements unless the appropriate option is
- set; do not log into slow log if reading from backup.
+ set.
*/
- if (thd->enable_slow_log && !thd->user_time)
+ if (thd->enable_slow_log)
{
ulonglong end_utime_of_query= thd->current_utime();
thd_proc_info(thd, "logging slow query");
@@ -3312,7 +3312,7 @@ end_with_restore_list:
TODO: this is workaround. right way will be move invalidating in
the unlock procedure.
*/
- if (first_table->lock_type == TL_WRITE_CONCURRENT_INSERT &&
+ if (!res && first_table->lock_type == TL_WRITE_CONCURRENT_INSERT &&
thd->lock)
{
/* INSERT ... SELECT should invalidate only the very first table */
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 143073a67b5..114c6fd0681 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2876,6 +2876,7 @@ int get_partition_id_range(partition_info *part_info,
*func_value= part_func_value;
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
+ /* Search for the partition containing part_func_value */
while (max_part_id > min_part_id)
{
loc_part_id= (max_part_id + min_part_id) / 2;
@@ -3015,13 +3016,18 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
part_end_val= range_array[loc_part_id];
if (left_endpoint)
{
+ DBUG_ASSERT(part_func_value > part_end_val ?
+ (loc_part_id == max_partition &&
+ !part_info->defined_max_value) :
+ 1);
/*
In case of PARTITION p VALUES LESS THAN MAXVALUE
- the maximum value is in the current partition.
+ the maximum value is in the current (last) partition.
+ If value is equal or greater than the endpoint,
+ the range starts from the next partition.
*/
- if (part_func_value > part_end_val ||
- (part_func_value == part_end_val &&
- (loc_part_id < max_partition || !part_info->defined_max_value)))
+ if (part_func_value >= part_end_val &&
+ (loc_part_id < max_partition || !part_info->defined_max_value))
loc_part_id++;
}
else
@@ -4274,6 +4280,12 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
{
DBUG_ENTER("prep_alter_part_table");
+ /* Foreign keys on partitioned tables are not supported, waits for WL#148 */
+ if (table->part_info && (alter_info->flags & ALTER_FOREIGN_KEY))
+ {
+ my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
/*
We are going to manipulate the partition info on the table object
so we need to ensure that the data structure of the table object
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 4ead793737b..151197be1d3 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1757,6 +1757,8 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
bzero(&tables, sizeof(tables));
tables.db= (char *)"mysql";
tables.table_name= tables.alias= (char *)"plugin";
+ if (check_table_access(thd, DELETE_ACL, &tables, 1, FALSE))
+ DBUG_RETURN(TRUE);
/* need to open before acquiring LOCK_plugin or it will deadlock */
if (! (table= open_ltable(thd, &tables, TL_WRITE, 0)))
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index 2312f95d8e2..9c0dec336b8 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007 MySQL AB
+/* Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -504,7 +504,7 @@ void PROFILING::set_query_source(char *query_source_arg, uint query_length_arg)
There are two ways to get to this function: Selecting from the information
schema, and a SHOW command.
*/
-int PROFILING::fill_statistics_info(THD *thd, TABLE_LIST *tables, Item *cond)
+int PROFILING::fill_statistics_info(THD *thd_arg, TABLE_LIST *tables, Item *cond)
{
DBUG_ENTER("PROFILING::fill_statistics_info");
TABLE *table= tables->table;
@@ -539,7 +539,7 @@ int PROFILING::fill_statistics_info(THD *thd, TABLE_LIST *tables, Item *cond)
/* Skip the first. We count spans of fence, not fence-posts. */
if (previous == NULL) continue;
- if (thd->lex->sql_command == SQLCOM_SHOW_PROFILE)
+ if (thd_arg->lex->sql_command == SQLCOM_SHOW_PROFILE)
{
/*
We got here via a SHOW command. That means that we stored
@@ -552,14 +552,14 @@ int PROFILING::fill_statistics_info(THD *thd, TABLE_LIST *tables, Item *cond)
struct where and having conditions at the SQL layer, then this
condition should be ripped out.
*/
- if (thd->lex->profile_query_id == 0) /* 0 == show final query */
+ if (thd_arg->lex->profile_query_id == 0) /* 0 == show final query */
{
if (query != last)
continue;
}
else
{
- if (thd->lex->profile_query_id != query->profiling_query_id)
+ if (thd_arg->lex->profile_query_id != query->profiling_query_id)
continue;
}
}
@@ -715,7 +715,7 @@ int PROFILING::fill_statistics_info(THD *thd, TABLE_LIST *tables, Item *cond)
table->field[17]->set_notnull();
}
- if (schema_table_store_record(thd, table))
+ if (schema_table_store_record(thd_arg, table))
DBUG_RETURN(1);
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index b3d90028532..3aa9e9ba56f 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1711,7 +1711,6 @@ int log_loaded_block(IO_CACHE* file)
if (mysql_bin_log.write(&b))
DBUG_RETURN(1);
lf_info->wrote_create_file= 1;
- DBUG_SYNC_POINT("debug_lock.created_file_event",10);
}
}
DBUG_RETURN(0);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e3b34126828..9db082faa95 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -297,6 +297,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
all_fields List of all fields used in select
select Current select
ref_pointer_array Array of references to Items used in current select
+ group_list GROUP BY list (is NULL by default)
DESCRIPTION
The function serves 3 purposes - adds fields referenced from inner
@@ -315,6 +316,8 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
function is aggregated in the select where the outer field was
resolved or in some more inner select then the Item_direct_ref
class should be used.
+ Also it should be used if we are grouping by a subquery containing
+ the outer field.
The resolution is done here and not at the fix_fields() stage as
it can be done only after sum functions are fixed and pulled up to
selects where they are have to be aggregated.
@@ -331,7 +334,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
bool
fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
- Item **ref_pointer_array)
+ Item **ref_pointer_array, ORDER *group_list)
{
Item_outer_ref *ref;
bool res= FALSE;
@@ -381,6 +384,22 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
}
}
}
+ else
+ {
+ /*
+ Check if GROUP BY item trees contain the outer ref:
+ in this case we have to use Item_direct_ref instead of Item_ref.
+ */
+ for (ORDER *group= group_list; group; group= group->next)
+ {
+ if ((*group->item)->walk(&Item::find_item_processor, TRUE,
+ (uchar *) ref))
+ {
+ direct_ref= TRUE;
+ break;
+ }
+ }
+ }
new_ref= direct_ref ?
new Item_direct_ref(ref->context, item_ref, ref->table_name,
ref->field_name, ref->alias_name_used) :
@@ -587,7 +606,8 @@ JOIN::prepare(Item ***rref_pointer_array,
}
if (select_lex->inner_refs_list.elements &&
- fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array))
+ fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array,
+ group_list))
DBUG_RETURN(-1);
if (group_list)
@@ -1096,12 +1116,37 @@ JOIN::optimize()
}
}
- if (conds &&!outer_join && const_table_map != found_const_table_map &&
+ if (conds && const_table_map != found_const_table_map &&
(select_options & SELECT_DESCRIBE) &&
select_lex->master_unit() == &thd->lex->unit) // upper level SELECT
{
conds=new Item_int((longlong) 0,1); // Always false
}
+
+ /*
+ It's necessary to check const part of HAVING cond as
+ there is a chance that some cond parts may become
+ const items after make_join_statisctics(for example
+ when Item is a reference to cost table field from
+ outer join).
+ This check is performed only for those conditions
+ which do not use aggregate functions. In such case
+ temporary table may not be used and const condition
+ elements may be lost during further having
+ condition transformation in JOIN::exec.
+ */
+ if (having && const_table_map)
+ {
+ having->update_used_tables();
+ having= remove_eq_conds(thd, having, &having_value);
+ if (having_value == Item::COND_FALSE)
+ {
+ having= new Item_int((longlong) 0,1);
+ zero_result_cause= "Impossible HAVING noticed after reading const tables";
+ DBUG_RETURN(0);
+ }
+ }
+
if (make_join_select(this, select, conds))
{
zero_result_cause=
@@ -2963,7 +3008,8 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
s->quick=select->quick;
s->needed_reg=select->needed_reg;
select->quick=0;
- if (records == 0 && s->table->reginfo.impossible_range)
+ if (records == 0 && s->table->reginfo.impossible_range &&
+ (s->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT))
{
/*
Impossible WHERE or ON expression
@@ -5087,6 +5133,11 @@ greedy_search(JOIN *join,
if (best_extension_by_limited_search(join, remaining_tables, idx, record_count,
read_time, search_depth, prune_level))
DBUG_RETURN(TRUE);
+ /*
+ 'best_read < DBL_MAX' means that optimizer managed to find
+ some plan and updated 'best_positions' array accordingly.
+ */
+ DBUG_ASSERT(join->best_read < DBL_MAX);
if (size_remain <= search_depth)
{
@@ -5915,7 +5966,7 @@ store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
@retval TRUE error occurred
*/
bool
-JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
+JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
{
DBUG_ENTER("JOIN::make_simple_join");
@@ -5928,13 +5979,19 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
DBUG_RETURN(TRUE); /* purecov: inspected */
join_tab= parent->join_tab_reexec;
- table= &parent->table_reexec[0]; parent->table_reexec[0]= tmp_table;
+ table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
tables= 1;
const_tables= 0;
const_table_map= 0;
eliminated_tables= 0;
tmp_table_param.field_count= tmp_table_param.sum_func_count=
tmp_table_param.func_count= 0;
+ /*
+ We need to destruct the copy_field (allocated in create_tmp_table())
+ before setting it to 0 if the join is not "reusable".
+ */
+ if (!tmp_join || tmp_join != this)
+ tmp_table_param.cleanup();
tmp_table_param.copy_field= tmp_table_param.copy_field_end=0;
first_record= sort_and_group=0;
send_records= (ha_rows) 0;
@@ -5943,7 +6000,7 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
do_send_rows= row_limit ? 1 : 0;
join_tab->cache.buff=0; /* No caching */
- join_tab->table=tmp_table;
+ join_tab->table=temp_table;
join_tab->select=0;
join_tab->select_cond=0;
join_tab->quick=0;
@@ -5960,8 +6017,8 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
join_tab->join= this;
join_tab->ref.key_parts= 0;
bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
- tmp_table->status=0;
- tmp_table->null_row=0;
+ temp_table->status=0;
+ temp_table->null_row=0;
DBUG_RETURN(FALSE);
}
@@ -6704,10 +6761,7 @@ make_join_readinfo(JOIN *join, ulonglong options)
case JT_CONST: // Only happens with left join
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->set_keyread(TRUE);
break;
case JT_ALL:
/*
@@ -6774,10 +6828,7 @@ make_join_readinfo(JOIN *join, ulonglong options)
if (tab->select && tab->select->quick &&
tab->select->quick->index != MAX_KEY && //not index_merge
table->covering_keys.is_set(tab->select->quick->index))
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->set_keyread(TRUE);
else if (!table->covering_keys.is_clear_all() &&
!(tab->select && tab->select->quick))
{ // Only read index tree
@@ -6861,11 +6912,7 @@ void JOIN_TAB::cleanup()
limit= 0;
if (table)
{
- if (table->key_read)
- {
- table->key_read= 0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
table->file->ha_index_or_rnd_end();
/*
We need to reset this for next select
@@ -7109,9 +7156,11 @@ eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab)
}
if (order)
{
- found++;
- DBUG_ASSERT(!(order->used & map));
- order->used|=map;
+ if (!(order->used & map))
+ {
+ found++;
+ order->used|= map;
+ }
continue; // Used in ORDER BY
}
if (!only_eq_ref_tables(join,start_order, (*ref_item)->used_tables()))
@@ -8279,7 +8328,8 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
else
{
DBUG_ASSERT(cond->type() == Item::COND_ITEM);
- ((Item_cond *) cond)->add_at_head(&eq_list);
+ if (eq_list.elements)
+ ((Item_cond *) cond)->add_at_head(&eq_list);
}
cond->quick_fix_field();
@@ -8878,8 +8928,14 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
we still make the inner tables dependent on the outer tables.
It would be enough to set dependency only on one outer table
for them. Yet this is really a rare case.
+ Note:
+ RAND_TABLE_BIT mask should not be counted as it
+ prevents update of inner table dependences.
+ For example it might happen if RAND() function
+ is used in JOIN ON clause.
*/
- if (!(prev_table->on_expr->used_tables() & ~prev_used_tables))
+ if (!((prev_table->on_expr->used_tables() & ~RAND_TABLE_BIT) &
+ ~prev_used_tables))
prev_table->dep_tables|= used_tables;
}
}
@@ -9910,7 +9966,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
KEY_PART_INFO *key_part_info;
Item **copy_func;
ENGINE_COLUMNDEF *recinfo;
- uint total_uneven_bit_length= 0;
+ /*
+ total_uneven_bit_length is uneven bit length for visible fields
+ hidden_uneven_bit_length is uneven bit length for hidden fields
+ */
+ uint total_uneven_bit_length= 0, hidden_uneven_bit_length= 0;
bool force_copy_fields= param->force_copy_fields;
/* Treat sum functions as normal ones when loose index scan is used. */
save_sum_fields|= param->precomputed_group_by;
@@ -10194,6 +10254,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
param->hidden_field_count= fieldnr;
null_count= 0;
+ /*
+ On last hidden field we store uneven bit length in
+ hidden_uneven_bit_length and proceed calculation of
+ uneven bits for visible fields into
+ total_uneven_bit_length variable.
+ */
+ hidden_uneven_bit_length= total_uneven_bit_length;
+ total_uneven_bit_length= 0;
}
}
DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
@@ -10239,7 +10307,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
else
null_count++;
}
- hidden_null_pack_length=(hidden_null_count+7)/8;
+ hidden_null_pack_length= (hidden_null_count + 7 +
+ hidden_uneven_bit_length) / 8;
null_pack_length= (hidden_null_pack_length +
(null_count + total_uneven_bit_length + 7) / 8);
reclength+=null_pack_length;
@@ -11785,21 +11854,45 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
return NESTED_LOOP_KILLED; // Aborted by user /* purecov: inspected */
}
SQL_SELECT *select=join_tab->select;
- if (rc == NESTED_LOOP_OK &&
- (!join_tab->cache.select || !join_tab->cache.select->skip_record()))
+ if (rc == NESTED_LOOP_OK)
{
- uint i;
- reset_cache_read(&join_tab->cache);
- for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
+ bool consider_record= !join_tab->cache.select ||
+ !join_tab->cache.select->skip_record();
+
+ /*
+ Check for error: skip_record() can execute code by calling
+ Item_subselect::val_*. We need to check for errors (if any)
+ after such call.
+ */
+ if (join->thd->is_error())
{
- read_cached_record(join_tab);
- if (!select || !select->skip_record())
+ reset_cache_write(&join_tab->cache);
+ return NESTED_LOOP_ERROR;
+ }
+
+ if (consider_record)
+ {
+ uint i;
+ reset_cache_read(&join_tab->cache);
+ for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
{
- rc= (join_tab->next_select)(join,join_tab+1,0);
- if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
+ read_cached_record(join_tab);
+ if (!select || !select->skip_record())
{
- reset_cache_write(&join_tab->cache);
- return rc;
+ /*
+ Check for error: skip_record() can execute code by calling
+ Item_subselect::val_*. We need to check for errors (if any)
+ after such call.
+ */
+ if (join->thd->is_error())
+ rc= NESTED_LOOP_ERROR;
+ else
+ rc= (join_tab->next_select)(join,join_tab+1,0);
+ if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
+ {
+ reset_cache_write(&join_tab->cache);
+ return rc;
+ }
}
}
}
@@ -11889,16 +11982,11 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
!table->no_keyread &&
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
{
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
+ table->set_keyread(TRUE);
tab->index= tab->ref.key;
}
error=join_read_const(tab);
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
if (error)
{
tab->info="unique row not found";
@@ -12280,12 +12368,8 @@ join_read_first(JOIN_TAB *tab)
{
int error= 0;
TABLE *table=tab->table;
- if (!table->key_read && table->covering_keys.is_set(tab->index) &&
- !table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
+ table->set_keyread(TRUE);
tab->table->status=0;
tab->read_record.read_record=join_read_next;
tab->read_record.table=table;
@@ -12321,12 +12405,8 @@ join_read_last(JOIN_TAB *tab)
{
TABLE *table=tab->table;
int error= 0;
- if (!table->key_read && table->covering_keys.is_set(tab->index) &&
- !table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
+ table->set_keyread(TRUE);
tab->table->status=0;
tab->read_record.read_record=join_read_prev;
tab->read_record.table=table;
@@ -13098,7 +13178,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
key_part_end=key_part+table->key_info[idx].key_parts;
key_part_map const_key_parts=table->const_key_parts[idx];
int reverse=0;
- my_bool on_primary_key= FALSE;
+ my_bool on_pk_suffix= FALSE;
DBUG_ENTER("test_if_order_by_key");
for (; order ; order=order->next, const_key_parts>>=1)
@@ -13120,11 +13200,12 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
key as a suffix to the secondary keys. If it has continue to check
the primary key as a suffix.
*/
- if (!on_primary_key &&
+ if (!on_pk_suffix &&
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
- table->s->primary_key != MAX_KEY)
+ table->s->primary_key != MAX_KEY &&
+ table->s->primary_key != idx)
{
- on_primary_key= TRUE;
+ on_pk_suffix= TRUE;
key_part= table->key_info[table->s->primary_key].key_part;
key_part_end=key_part+table->key_info[table->s->primary_key].key_parts;
const_key_parts=table->const_key_parts[table->s->primary_key];
@@ -13156,7 +13237,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
reverse=flag; // Remember if reverse
key_part++;
}
- if (on_primary_key)
+ if (on_pk_suffix)
{
uint used_key_parts_secondary= table->key_info[idx].key_parts;
uint used_key_parts_pk=
@@ -13183,12 +13264,35 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
uint find_shortest_key(TABLE *table, const key_map *usable_keys)
{
- uint min_length= (uint) ~0;
uint best= MAX_KEY;
+ uint usable_clustered_pk= (table->file->primary_key_is_clustered() &&
+ table->s->primary_key != MAX_KEY &&
+ usable_keys->is_set(table->s->primary_key)) ?
+ table->s->primary_key : MAX_KEY;
if (!usable_keys->is_clear_all())
{
+ uint min_length= (uint) ~0;
for (uint nr=0; nr < table->s->keys ; nr++)
{
+ /*
+ As far as
+ 1) clustered primary key entry data set is a set of all record
+ fields (key fields and not key fields) and
+ 2) secondary index entry data is a union of its key fields and
+ primary key fields (at least InnoDB and its derivatives don't
+ duplicate primary key fields there, even if the primary and
+ the secondary keys have a common subset of key fields),
+ then secondary index entry data is always a subset of primary key
+ entry, and the PK is always longer.
+ Unfortunately, key_info[nr].key_length doesn't show the length
+ of key/pointer pair but a sum of key field lengths only, thus
+ we can't estimate index IO volume comparing only this key_length
+ value of seconday keys and clustered PK.
+ So, try secondary keys first, and choose PK only if there are no
+ usable secondary covering keys:
+ */
+ if (nr == usable_clustered_pk)
+ continue;
if (usable_keys->is_set(nr))
{
if (table->key_info[nr].key_length < min_length)
@@ -13199,7 +13303,7 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys)
}
}
}
- return best;
+ return best != MAX_KEY ? best : usable_clustered_pk;
}
/**
@@ -13581,12 +13685,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (select_limit >= table_records)
{
- /*
- filesort() and join cache are usually faster than reading in
- index order and not using join cache
- */
- if (tab->type == JT_ALL && tab->join->tables > tab->join->const_tables + 1)
- DBUG_RETURN(0);
keys= *table->file->keys_to_use_for_scanning();
keys.merge(table->covering_keys);
@@ -13648,8 +13746,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
select_limit= table_records;
if (group)
{
- rec_per_key= used_key_parts ? keyinfo->rec_per_key[used_key_parts-1]
- : 1;
+ /*
+ Used_key_parts can be larger than keyinfo->key_parts
+ when using a secondary index clustered with a primary
+ key (e.g. as in Innodb).
+ See Bug #28591 for details.
+ */
+ rec_per_key= used_key_parts &&
+ used_key_parts <= keyinfo->key_parts ?
+ keyinfo->rec_per_key[used_key_parts-1] : 1;
set_if_bigger(rec_per_key, 1);
/*
With a grouping query each group containing on average
@@ -13729,6 +13834,19 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
}
}
}
+
+ /*
+ filesort() and join cache are usually faster than reading in
+ index order and not using join cache, except in case that chosen
+ index is clustered primary key.
+ */
+ if ((select_limit >= table_records) &&
+ (tab->type == JT_ALL &&
+ tab->join->tables > tab->join->const_tables + 1) &&
+ ((unsigned) best_key != table->s->primary_key ||
+ !table->file->primary_key_is_clustered()))
+ DBUG_RETURN(0);
+
if (best_key >= 0)
{
bool quick_created= FALSE;
@@ -13750,11 +13868,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
If ref_key used index tree reading only ('Using index' in EXPLAIN),
and best_key doesn't, then revert the decision.
*/
- if (!table->covering_keys.is_set(best_key) && table->key_read)
- {
- table->key_read= 0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ if (!table->covering_keys.is_set(best_key))
+ table->set_keyread(FALSE);
if (!quick_created)
{
tab->index= best_key;
@@ -13767,10 +13882,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
select->quick= 0;
}
if (table->covering_keys.is_set(best_key))
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->set_keyread(TRUE);
table->file->ha_index_or_rnd_end();
if (join->select_options & SELECT_DESCRIBE)
{
@@ -13944,11 +14056,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
We can only use 'Only index' if quick key is same as ref_key
and in index_merge 'Only index' cannot be used
*/
- if (table->key_read && ((uint) tab->ref.key != select->quick->index))
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ if (((uint) tab->ref.key != select->quick->index))
+ table->set_keyread(FALSE);
}
else
{
@@ -14004,11 +14113,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
tab->type=JT_ALL; // Read with normal read_record
tab->read_first_record= join_init_read_record;
tab->join->examined_rows+=examined_rows;
- if (table->key_read) // Restore if we used indexes
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE); // Restore if we used indexes
DBUG_RETURN(table->sort.found_records == HA_POS_ERROR);
err:
DBUG_RETURN(-1);
@@ -14446,7 +14551,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
{
used_fields--;
length+=field->fill_cache_field(copy);
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
(*blob_ptr++)=copy;
if (field->real_maybe_null())
null_fields++;
@@ -14461,8 +14566,8 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
{ /* must copy null bits */
copy->str= tables[i].table->null_flags;
copy->length= tables[i].table->s->null_bytes;
- copy->strip=0;
- copy->blob_field=0;
+ copy->type=0;
+ copy->field=0;
length+=copy->length;
copy++;
cache->fields++;
@@ -14472,8 +14577,8 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
{
copy->str= (uchar*) &tables[i].table->null_row;
copy->length=sizeof(tables[i].table->null_row);
- copy->strip=0;
- copy->blob_field=0;
+ copy->type=0;
+ copy->field=0;
length+=copy->length;
copy++;
cache->fields++;
@@ -14498,9 +14603,10 @@ used_blob_length(CACHE_FIELD **ptr)
uint length,blob_length;
for (length=0 ; *ptr ; ptr++)
{
- (*ptr)->blob_length=blob_length=(*ptr)->blob_field->get_length();
+ Field_blob *field_blob= (Field_blob *) (*ptr)->field;
+ (*ptr)->blob_length=blob_length= field_blob->get_length();
length+=blob_length;
- (*ptr)->blob_field->get_ptr(&(*ptr)->str);
+ field_blob->get_ptr(&(*ptr)->str);
}
return length;
}
@@ -14529,30 +14635,35 @@ store_record_in_cache(JOIN_CACHE *cache)
cache->records++;
for (copy=cache->field ; copy < end_field; copy++)
{
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
{
+ Field_blob *blob_field= (Field_blob *) copy->field;
if (last_record)
{
- copy->blob_field->get_image(pos, copy->length+sizeof(char*),
- copy->blob_field->charset());
+ blob_field->get_image(pos, copy->length+sizeof(char*),
+ blob_field->charset());
pos+=copy->length+sizeof(char*);
}
else
{
- copy->blob_field->get_image(pos, copy->length, // blob length
- copy->blob_field->charset());
+ blob_field->get_image(pos, copy->length, // blob length
+ blob_field->charset());
memcpy(pos+copy->length,copy->str,copy->blob_length); // Blob data
pos+=copy->length+copy->blob_length;
}
}
else
{
- if (copy->strip)
+ if (copy->type == CACHE_STRIPPED)
{
uchar *str,*end;
- for (str=copy->str,end= str+copy->length;
- end > str && end[-1] == ' ' ;
- end--) ;
+ Field *field= copy->field;
+ if (field && field->maybe_null() && field->is_null())
+ end= str= copy->str;
+ else
+ for (str=copy->str,end= str+copy->length;
+ end > str && end[-1] == ' ' ;
+ end--) ;
length=(uint) (end-str);
memcpy(pos+2, str, length);
int2store(pos, length);
@@ -14601,23 +14712,24 @@ read_cached_record(JOIN_TAB *tab)
copy < end_field;
copy++)
{
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
{
+ Field_blob *blob_field= (Field_blob *) copy->field;
if (last_record)
{
- copy->blob_field->set_image(pos, copy->length+sizeof(char*),
- copy->blob_field->charset());
+ blob_field->set_image(pos, copy->length+sizeof(char*),
+ blob_field->charset());
pos+=copy->length+sizeof(char*);
}
else
{
- copy->blob_field->set_ptr(pos, pos+copy->length);
- pos+=copy->length+copy->blob_field->get_length();
+ blob_field->set_ptr(pos, pos+copy->length);
+ pos+=copy->length + blob_field->get_length();
}
}
else
{
- if (copy->strip)
+ if (copy->type == CACHE_STRIPPED)
{
length= uint2korr(pos);
memcpy(copy->str, pos+2, length);
@@ -14827,11 +14939,29 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
We check order_item->fixed because Item_func_group_concat can put
arguments for which fix_fields already was called.
+
+ group_fix_field= TRUE is to resolve aliases from the SELECT list
+ without creating of Item_ref-s: JOIN::exec() wraps aliased items
+ in SELECT list with Item_copy items. To re-evaluate such a tree
+ that includes Item_copy items we have to refresh Item_copy caches,
+ but:
+ - filesort() never refresh Item_copy items,
+ - end_send_group() checks every record for group boundary by the
+ test_if_group_changed function that obtain data from these
+ Item_copy items, but the copy_fields function that
+ refreshes Item copy items is called after group boundaries only -
+ that is a vicious circle.
+ So we prevent inclusion of Item_copy items.
*/
- if (!order_item->fixed &&
+ bool save_group_fix_field= thd->lex->current_select->group_fix_field;
+ if (is_group_field)
+ thd->lex->current_select->group_fix_field= TRUE;
+ bool ret= (!order_item->fixed &&
(order_item->fix_fields(thd, order->item) ||
(order_item= *order->item)->check_cols(1) ||
- thd->is_fatal_error))
+ thd->is_fatal_error));
+ thd->lex->current_select->group_fix_field= save_group_fix_field;
+ if (ret)
return TRUE; /* Wrong field. */
uint el= all_fields.elements;
@@ -15913,7 +16043,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
Item_cond_and *cond=new Item_cond_and();
TABLE *table=join_tab->table;
- int error;
+ int error= 0;
if (!cond)
DBUG_RETURN(TRUE);
@@ -15935,7 +16065,8 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (join_tab->select)
{
- error=(int) cond->add(join_tab->select->cond);
+ if (join_tab->select->cond)
+ error=(int) cond->add(join_tab->select->cond);
join_tab->select_cond=join_tab->select->cond=cond;
}
else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
@@ -17211,7 +17342,17 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
first= 0;
else
str->append(',');
- item->print_item_w_name(str, query_type);
+
+ if (master_unit()->item && item->is_autogenerated_name)
+ {
+ /*
+ Do not print auto-generated aliases in subqueries. It has no purpose
+ in a view definition or other contexts where the query is printed.
+ */
+ item->print(str, query_type);
+ }
+ else
+ item->print_item_w_name(str, query_type);
}
/*
diff --git a/sql/sql_select.h b/sql/sql_select.h
index c50303ee812..285019b4a5c 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -95,6 +95,10 @@ typedef struct st_table_ref
} TABLE_REF;
+
+#define CACHE_BLOB 1 /* blob field */
+#define CACHE_STRIPPED 2 /* field stripped of trailing spaces */
+
/**
CACHE_FIELD and JOIN_CACHE is used on full join to cache records in outer
table
@@ -103,8 +107,8 @@ typedef struct st_table_ref
typedef struct st_cache_field {
uchar *str;
uint length, blob_length;
- Field_blob *blob_field;
- bool strip;
+ Field *field;
+ uint type; /**< category of the of the copied field (CACHE_BLOB et al.) */
} CACHE_FIELD;
@@ -362,7 +366,25 @@ public:
*/
bool no_const_tables;
- JOIN *tmp_join; ///< copy of this JOIN to be used with temporary tables
+ /**
+ Copy of this JOIN to be used with temporary tables.
+
+ tmp_join is used when the JOIN needs to be "reusable" (e.g. in a subquery
+ that gets re-executed several times) and we know will use temporary tables
+ for materialization. The materialization to a temporary table overwrites the
+ JOIN structure to point to the temporary table after the materialization is
+ done. This is where tmp_join is used : it's a copy of the JOIN before the
+ materialization and is used in restoring before re-execution by overwriting
+ the current JOIN structure with the saved copy.
+ Because of this we should pay extra care of not freeing up helper structures
+ that are referenced by the original contents of the JOIN. We can check for
+ this by making sure the "current" join is not the temporary copy, e.g.
+ !tmp_join || tmp_join != join
+
+ We should free these sub-structures at JOIN::destroy() if the "current" join
+ has a copy is not that copy.
+ */
+ JOIN *tmp_join;
ROLLUP rollup; ///< Used with rollup
bool select_distinct; ///< Set if SELECT DISTINCT
@@ -727,10 +749,11 @@ public:
we need to check for errors executing it and react accordingly
*/
if (!res && table->in_use->is_error())
- res= 2;
+ res= 1; /* STORE_KEY_FATAL */
dbug_tmp_restore_column_map(table->write_set, old_map);
null_key= to_field->is_null() || item->null_value;
- return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res);
+ return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL :
+ (store_key_result) res);
}
};
@@ -759,17 +782,17 @@ protected:
if ((res= item->save_in_field(to_field, 1)))
{
if (!err)
- err= res;
+ err= res < 0 ? 1 : res; /* 1=STORE_KEY_FATAL */
}
/*
Item::save_in_field() may call Item::val_xxx(). And if this is a subquery
we need to check for errors executing it and react accordingly
*/
if (!err && to_field->table->in_use->is_error())
- err= 2;
+ err= 1; /* STORE_KEY_FATAL */
}
null_key= to_field->is_null() || item->null_value;
- return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err);
+ return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err);
}
};
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 68d1d4619f4..a288cad88d7 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2007,6 +2007,8 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
pthread_mutex_unlock(&mysys_var->mutex);
/* INFO */
+ /* Lock THD mutex that protects its data when looking at it. */
+ pthread_mutex_lock(&tmp->LOCK_thd_data);
if (tmp->query())
{
table->field[7]->store(tmp->query(),
@@ -2014,6 +2016,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
tmp->query_length()), cs);
table->field[7]->set_notnull();
}
+ pthread_mutex_unlock(&tmp->LOCK_thd_data);
/* TIME_MS */
table->field[8]->store((double)(utime / 1000.0));
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 6b7b3e2c0ab..4ed47ed5566 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -22,6 +22,7 @@
#include "sp_head.h"
#include "sql_trigger.h"
#include "sql_show.h"
+#include "debug_sync.h"
#ifdef __WIN__
#include <io.h>
@@ -1889,23 +1890,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
pthread_mutex_lock(&LOCK_open);
- /*
- If we have the table in the definition cache, we don't have to check the
- .frm file to find if the table is a normal table (not view) and what
- engine to use.
- */
-
+ /* Disable drop of enabled log tables, must be done before name locking */
for (table= tables; table; table= table->next_local)
{
- TABLE_SHARE *share;
- table->db_type= NULL;
-
- if ((share= get_cached_table_share(table->db, table->table_name)))
- table->db_type= share->db_type();
-
- /* Disable drop of enabled log tables */
- if (share && (share->table_category == TABLE_CATEGORY_PERFORMANCE) &&
- check_if_log_table(table->db_length, table->db,
+ if (check_if_log_table(table->db_length, table->db,
table->table_name_length, table->table_name, 1))
{
my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP");
@@ -1924,7 +1912,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
{
char *db=table->db;
handlerton *table_type;
- enum legacy_db_type frm_db_type;
+ enum legacy_db_type frm_db_type= DB_TYPE_UNKNOWN;
DBUG_PRINT("table", ("table_l: '%s'.'%s' table: 0x%lx s: 0x%lx",
table->db, table->table_name, (long) table->table,
@@ -1989,7 +1977,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
built_query.append("`,");
}
- table_type= table->db_type;
if (!drop_temporary)
{
TABLE *locked_table;
@@ -2017,9 +2004,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
table->internal_tmp_table ?
FN_IS_TMP : 0);
}
+ DEBUG_SYNC(thd, "rm_table_part2_before_delete_table");
if (drop_temporary ||
- ((table_type == NULL &&
- access(path, F_OK) &&
+ ((access(path, F_OK) &&
ha_create_table_from_engine(thd, db, alias)) ||
(!drop_view &&
mysql_frm_type(thd, path, &frm_db_type) != FRMTYPE_TABLE)))
@@ -2035,15 +2022,25 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
else
{
char *end;
- if (table_type == NULL)
+ /*
+ Cannot use the db_type from the table, since that might have changed
+ while waiting for the exclusive name lock. We are under LOCK_open,
+ so reading from the frm-file is safe.
+ */
+ if (frm_db_type == DB_TYPE_UNKNOWN)
{
- mysql_frm_type(thd, path, &frm_db_type);
- table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
+ mysql_frm_type(thd, path, &frm_db_type);
+ DBUG_PRINT("info", ("frm_db_type %d from %s", frm_db_type, path));
}
+ table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
// Remove extension for delete
*(end= path + path_length - reg_ext_length)= '\0';
+ DBUG_PRINT("info", ("deleting table of type %d",
+ (table_type ? table_type->db_type : 0)));
error= ha_delete_table(thd, table_type, path, db, table->table_name,
!dont_log_query);
+
+ /* No error if non existent table and 'IF EXIST' clause or view */
if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) &&
(if_exists || table_type == NULL))
{
@@ -2083,6 +2080,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
on the table name.
*/
pthread_mutex_unlock(&LOCK_open);
+ DEBUG_SYNC(thd, "rm_table_part2_before_binlog");
thd->thread_specific_used|= tmp_table_deleted;
error= 0;
if (wrong_tables.length())
@@ -5580,6 +5578,45 @@ err:
DBUG_RETURN(-1);
}
+/**
+ @brief Check if both DROP and CREATE are present for an index in ALTER TABLE
+
+ @details Checks if any index is being modified (present as both DROP INDEX
+ and ADD INDEX) in the current ALTER TABLE statement. Needed for disabling
+ online ALTER TABLE.
+
+ @param table The table being altered
+ @param alter_info The ALTER TABLE structure
+ @return presence of index being altered
+ @retval FALSE No such index
+ @retval TRUE Have at least 1 index modified
+*/
+
+static bool
+is_index_maintenance_unique (TABLE *table, Alter_info *alter_info)
+{
+ List_iterator<Key> key_it(alter_info->key_list);
+ List_iterator<Alter_drop> drop_it(alter_info->drop_list);
+ Key *key;
+
+ while ((key= key_it++))
+ {
+ if (key->name)
+ {
+ Alter_drop *drop;
+
+ drop_it.rewind();
+ while ((drop= drop_it++))
+ {
+ if (drop->type == Alter_drop::KEY &&
+ !my_strcasecmp(system_charset_info, key->name, drop->name))
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
/*
SYNOPSIS
@@ -6887,10 +6924,14 @@ view_err:
*/
new_db_type= create_info->db_type;
+ if (is_index_maintenance_unique (table, alter_info))
+ need_copy_table= ALTER_TABLE_DATA_CHANGED;
+
if (mysql_prepare_alter_table(thd, table, create_info, alter_info))
goto err;
- need_copy_table= alter_info->change_level;
+ if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
+ need_copy_table= alter_info->change_level;
set_table_default_charset(thd, create_info, db);
@@ -7155,6 +7196,7 @@ view_err:
else
create_info->data_file_name=create_info->index_file_name=0;
+ DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
/*
Create a table with a temporary name.
With create_info->frm_only == 1 this creates a .frm file only.
@@ -7360,6 +7402,7 @@ view_err:
intern_close_table(new_table);
my_free(new_table,MYF(0));
}
+ DEBUG_SYNC(thd, "alter_table_before_rename_result_table");
VOID(pthread_mutex_lock(&LOCK_open));
if (error)
{
@@ -7502,6 +7545,7 @@ view_err:
thd_proc_info(thd, "end");
DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000););
+ DEBUG_SYNC(thd, "alter_table_before_main_binlog");
ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
thd->query(), thd->query_length(),
@@ -7977,26 +8021,31 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
for (uint i= 0; i < t->s->fields; i++ )
{
Field *f= t->field[i];
- enum_field_types field_type= f->type();
if (! thd->variables.old_mode &&
f->is_real_null(0))
continue;
- /*
- BLOB and VARCHAR have pointers in their field, we must convert
- to string; GEOMETRY is implemented on top of BLOB.
- */
- if ((field_type == MYSQL_TYPE_BLOB) ||
- (field_type == MYSQL_TYPE_VARCHAR) ||
- (field_type == MYSQL_TYPE_GEOMETRY))
- {
- String tmp;
- f->val_str(&tmp);
- row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(), tmp.length());
+ /*
+ BLOB and VARCHAR have pointers in their field, we must convert
+ to string; GEOMETRY is implemented on top of BLOB.
+ BIT may store its data among NULL bits, convert as well.
+ */
+ switch (f->type()) {
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_GEOMETRY:
+ case MYSQL_TYPE_BIT:
+ {
+ String tmp;
+ f->val_str(&tmp);
+ row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(),
+ tmp.length());
+ break;
+ }
+ default:
+ row_crc= my_checksum(row_crc, f->ptr, f->pack_length());
+ break;
}
- else
- row_crc= my_checksum(row_crc, f->ptr,
- f->pack_length());
}
crc+= row_crc;
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index ba0515d38ad..aafb25013f6 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -327,6 +327,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
TABLE *table;
bool result= TRUE;
String stmt_query;
+ Query_tables_list backup;
bool need_start_waiting= FALSE;
DBUG_ENTER("mysql_create_or_drop_trigger");
@@ -393,6 +394,12 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
{
bool if_exists= thd->lex->drop_if_exists;
+ /*
+ Protect the query table list from the temporary and potentially
+ destructive changes necessary to open the trigger's table.
+ */
+ thd->lex->reset_n_backup_query_tables_list(&backup);
+
if (add_table_for_trigger(thd, thd->lex->spname, if_exists, & tables))
goto end;
@@ -512,6 +519,10 @@ end:
VOID(pthread_mutex_unlock(&LOCK_open));
+ /* Restore the query table list. Used only for drop trigger. */
+ if (!create)
+ thd->lex->restore_backup_query_tables_list(&backup);
+
if (need_start_waiting)
start_waiting_global_read_lock(thd);
@@ -1625,10 +1636,6 @@ bool add_table_for_trigger(THD *thd,
if (load_table_name_for_trigger(thd, trg_name, &trn_path, &tbl_name))
DBUG_RETURN(TRUE);
- /* We need to reset statement table list to be PS/SP friendly. */
- lex->query_tables= 0;
- lex->query_tables_last= &lex->query_tables;
-
*table= sp_add_to_query_tables(thd, lex, trg_name->m_db.str,
tbl_name.str, TL_IGNORE);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 0bef5aa3ae8..1d1f9dedf22 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -397,10 +397,7 @@ int mysql_update(THD *thd,
matching rows before updating the table!
*/
if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
- {
- table->key_read=1;
table->mark_columns_used_by_index(used_index);
- }
else
{
table->use_all_columns();
@@ -844,11 +841,7 @@ int mysql_update(THD *thd,
err:
delete select;
free_underlaid_joins(thd, select_lex);
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
thd->abort_on_warning= 0;
DBUG_RETURN(1);
}
@@ -1195,6 +1188,56 @@ reopen_tables:
}
+/**
+ Implementation of the safe update options during UPDATE IGNORE. This syntax
+ causes an UPDATE statement to ignore all errors. In safe update mode,
+ however, we must never ignore the ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE. There
+ is a special hook in my_message_sql that will otherwise delete all errors
+ when the IGNORE option is specified.
+
+ In the future, all IGNORE handling should be used with this class and all
+ traces of the hack outlined below should be removed.
+
+ - The parser detects IGNORE option and sets thd->lex->ignore= 1
+
+ - In JOIN::optimize, if this is set, then
+ thd->lex->current_select->no_error gets set.
+
+ - In my_message_sql(), if the flag above is set then any error is
+ unconditionally converted to a warning.
+
+ We are moving in the direction of using Internal_error_handler subclasses
+ to do all such error tweaking, please continue this effort if new bugs
+ appear.
+ */
+class Safe_dml_handler : public Internal_error_handler {
+
+private:
+ bool m_handled_error;
+
+public:
+ explicit Safe_dml_handler() : m_handled_error(FALSE) {}
+
+ bool handle_error(uint sql_errno,
+ const char *message,
+ MYSQL_ERROR::enum_warning_level level,
+ THD *thd)
+ {
+ if (level == MYSQL_ERROR::WARN_LEVEL_ERROR &&
+ sql_errno == ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE)
+
+ {
+ thd->main_da.set_error_status(thd, sql_errno, message);
+ m_handled_error= TRUE;
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+ bool handled_error() { return m_handled_error; }
+
+};
+
/*
Setup multi-update handling and call SELECT to do the join
*/
@@ -1223,18 +1266,36 @@ bool mysql_multi_update(THD *thd,
MODE_STRICT_ALL_TABLES));
List<Item> total_list;
+
+ Safe_dml_handler handler;
+ bool using_handler= thd->options & OPTION_SAFE_UPDATES;
+ if (using_handler)
+ thd->push_internal_handler(&handler);
+
res= mysql_select(thd, &select_lex->ref_pointer_array,
- table_list, select_lex->with_wild,
- total_list,
- conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
- (ORDER *)NULL,
- options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
- OPTION_SETUP_TABLES_DONE,
- result, unit, select_lex);
- DBUG_PRINT("info",("res: %d report_error: %d", res,
- (int) thd->is_error()));
+ table_list, select_lex->with_wild,
+ total_list,
+ conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
+ (ORDER *)NULL,
+ options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
+ OPTION_SETUP_TABLES_DONE,
+ result, unit, select_lex);
+
+ if (using_handler)
+ {
+ Internal_error_handler *top_handler;
+ top_handler= thd->pop_internal_handler();
+ DBUG_ASSERT(&handler == top_handler);
+ }
+
+ DBUG_PRINT("info",("res: %d report_error: %d", res, (int) thd->is_error()));
res|= thd->is_error();
- if (unlikely(res))
+ /*
+ Todo: remove below code and make Safe_dml_handler do error processing
+ instead. That way we can return the actual error instead of
+ ER_UNKNOWN_ERROR.
+ */
+ if (unlikely(res) && (!using_handler || !handler.handled_error()))
{
/* If we had a another error reported earlier then this will be ignored */
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index e1e0b3ff6c4..b9379d8898f 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -155,6 +155,35 @@ err:
DBUG_RETURN(TRUE);
}
+
+/**
+ Check if auto generated column names are conforming and
+ possibly generate a conforming name for them if not.
+
+ @param item_list List of Items which should be checked
+*/
+
+static void make_valid_column_names(List<Item> &item_list)
+{
+ Item *item;
+ uint name_len;
+ List_iterator_fast<Item> it(item_list);
+ char buff[NAME_LEN];
+ DBUG_ENTER("make_valid_column_names");
+
+ for (uint column_no= 1; (item= it++); column_no++)
+ {
+ if (!item->is_autogenerated_name || !check_column_name(item->name))
+ continue;
+ name_len= my_snprintf(buff, NAME_LEN, "Name_exp_%u", column_no);
+ item->orig_name= item->name;
+ item->set_name(buff, name_len, system_charset_info);
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
/*
Fill defined view parts
@@ -400,17 +429,14 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
DBUG_ASSERT(!lex->proc_list.first && !lex->result &&
!lex->param_list.elements);
- if (mode != VIEW_CREATE_NEW)
+ if (mode == VIEW_ALTER && fill_defined_view_parts(thd, view))
{
- if (mode == VIEW_ALTER &&
- fill_defined_view_parts(thd, view))
- {
- res= TRUE;
- goto err;
- }
- sp_cache_invalidate();
+ res= TRUE;
+ goto err;
}
+ sp_cache_invalidate();
+
if (!lex->definer)
{
/*
@@ -551,6 +577,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
}
}
+ /* Check if the auto generated column names are conforming. */
+ make_valid_column_names(select_lex->item_list);
+
if (check_duplicate_names(select_lex->item_list, 1))
{
res= TRUE;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index fc2e18f28f2..69ac06eaa3e 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -8029,7 +8029,7 @@ function_call_generic:
builder= find_native_function_builder(thd, $1);
if (builder)
{
- item= builder->create(thd, $1, $4);
+ item= builder->create_func(thd, $1, $4);
}
else
{
@@ -8051,7 +8051,7 @@ function_call_generic:
{
builder= find_qualified_function_builder(thd);
DBUG_ASSERT(builder);
- item= builder->create(thd, $1, $4);
+ item= builder->create_func(thd, $1, $4);
}
}
diff --git a/sql/table.cc b/sql/table.cc
index 733aa3e6887..78c415286a2 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -3372,7 +3372,7 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type)
void TABLE_LIST::hide_view_error(THD *thd)
{
- if (thd->get_internal_handler())
+ if (thd->killed || thd->get_internal_handler())
return;
/* Hide "Unknown column" or "Unknown function" error */
DBUG_ASSERT(thd->is_error());
@@ -4030,9 +4030,7 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
{
DBUG_RETURN(field);
}
- Item *item= new Item_direct_view_ref(&view->view->select_lex.context,
- field_ref, view->alias,
- name);
+ Item *item= new Item_direct_view_ref(view, field_ref, name);
DBUG_RETURN(item);
}
@@ -4381,7 +4379,7 @@ void st_table::mark_columns_used_by_index(uint index)
MY_BITMAP *bitmap= &tmp_set;
DBUG_ENTER("st_table::mark_columns_used_by_index");
- (void) file->extra(HA_EXTRA_KEYREAD);
+ set_keyread(TRUE);
bitmap_clear_all(bitmap);
mark_columns_used_by_index_no_reset(index, bitmap);
column_bitmaps_set(bitmap, bitmap);
@@ -4404,8 +4402,7 @@ void st_table::restore_column_maps_after_mark_index()
{
DBUG_ENTER("st_table::restore_column_maps_after_mark_index");
- key_read= 0;
- (void) file->extra(HA_EXTRA_NO_KEYREAD);
+ set_keyread(FALSE);
default_column_bitmaps();
file->column_bitmaps_signal();
DBUG_VOID_RETURN;
diff --git a/sql/table.h b/sql/table.h
index a24e79e26cf..e43e6e5950f 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -905,6 +905,20 @@ struct st_table {
inline bool needs_reopen_or_name_lock()
{ return s->version != refresh_version; }
bool is_children_attached(void);
+ inline void set_keyread(bool flag)
+ {
+ DBUG_ASSERT(file);
+ if (flag && !key_read)
+ {
+ key_read= 1;
+ file->extra(HA_EXTRA_KEYREAD);
+ }
+ else if (!flag && key_read)
+ {
+ key_read= 0;
+ file->extra(HA_EXTRA_NO_KEYREAD);
+ }
+ }
};
enum enum_schema_table_state