summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc3
-rw-r--r--sql/ha_partition.cc144
-rw-r--r--sql/ha_partition.h12
-rw-r--r--sql/item.h24
-rw-r--r--sql/item_create.cc10
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/item_func.h5
-rw-r--r--sql/item_strfunc.cc4
-rw-r--r--sql/log_event.cc58
-rw-r--r--sql/log_event_old.cc39
-rw-r--r--sql/sp_head.cc11
-rw-r--r--sql/sql_acl.cc15
-rw-r--r--sql/sql_load.cc6
-rw-r--r--sql/sql_parse.cc26
-rw-r--r--sql/sql_repl.cc9
-rw-r--r--sql/sql_select.cc3
-rw-r--r--sql/sql_servers.cc10
-rw-r--r--sql/sql_union.cc31
-rw-r--r--sql/sql_update.cc6
19 files changed, 325 insertions, 93 deletions
diff --git a/sql/field.cc b/sql/field.cc
index a07493a6964..51bb527fc85 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -7835,8 +7835,7 @@ uint Field_blob::is_equal(Create_field *new_field)
return ((new_field->sql_type == get_blob_type_from_length(max_data_length()))
&& new_field->charset == field_charset &&
- ((Field_blob *)new_field->field)->max_data_length() ==
- max_data_length());
+ new_field->pack_length == pack_length());
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 4f3d58dc75e..4525298e8de 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -5898,6 +5898,23 @@ const key_map *ha_partition::keys_to_use_for_scanning()
DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
}
+#define MAX_PARTS_FOR_OPTIMIZER_CALLS 10
+/*
+ Prepare start variables for estimating optimizer costs.
+
+ @param[out] num_used_parts Number of partitions after pruning.
+ @param[out] check_min_num Number of partitions to call.
+ @param[out] first first used partition.
+*/
+void ha_partition::partitions_optimizer_call_preparations(uint *first,
+ uint *num_used_parts,
+ uint *check_min_num)
+{
+ *first= bitmap_get_first_set(&(m_part_info->used_partitions));
+ *num_used_parts= bitmap_bits_set(&(m_part_info->used_partitions));
+ *check_min_num= min(MAX_PARTS_FOR_OPTIMIZER_CALLS, *num_used_parts);
+}
+
/*
Return time for a scan of the table
@@ -5911,43 +5928,67 @@ const key_map *ha_partition::keys_to_use_for_scanning()
double ha_partition::scan_time()
{
- double scan_time= 0;
- handler **file;
+ double scan_time= 0.0;
+ uint first, part_id, num_used_parts, check_min_num, partitions_called= 0;
DBUG_ENTER("ha_partition::scan_time");
- for (file= m_file; *file; file++)
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- scan_time+= (*file)->scan_time();
+ partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num);
+ for (part_id= first; partitions_called < num_used_parts ; part_id++)
+ {
+ if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
+ continue;
+ scan_time+= m_file[part_id]->scan_time();
+ partitions_called++;
+ if (partitions_called >= check_min_num && scan_time != 0.0)
+ {
+ DBUG_RETURN(scan_time *
+ (double) num_used_parts / (double) partitions_called);
+ }
+ }
DBUG_RETURN(scan_time);
}
/*
- Get time to read
+ Estimate rows for records_in_range or estimate_rows_upper_bound.
- SYNOPSIS
- read_time()
- index Index number used
- ranges Number of ranges
- rows Number of rows
-
- RETURN VALUE
- time for read
+ @param is_records_in_range call records_in_range instead of
+ estimate_rows_upper_bound.
+ @param inx (only for records_in_range) index to use.
+ @param min_key (only for records_in_range) start of range.
+ @param max_key (only for records_in_range) end of range.
- DESCRIPTION
- This will be optimised later to include whether or not the index can
- be used with partitioning. To achieve we need to add another parameter
- that specifies how many of the index fields that are bound in the ranges.
- Possibly added as a new call to handlers.
+ @return Number of rows or HA_POS_ERROR.
*/
-
-double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
+ha_rows ha_partition::estimate_rows(bool is_records_in_range, uint inx,
+ key_range *min_key, key_range *max_key)
{
- DBUG_ENTER("ha_partition::read_time");
+ ha_rows rows, estimated_rows= 0;
+ uint first, part_id, num_used_parts, check_min_num, partitions_called= 0;
+ DBUG_ENTER("ha_partition::records_in_range");
- DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
+ partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num);
+ for (part_id= first; partitions_called < num_used_parts ; part_id++)
+ {
+ if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
+ continue;
+ if (is_records_in_range)
+ rows= m_file[part_id]->records_in_range(inx, min_key, max_key);
+ else
+ rows= m_file[part_id]->estimate_rows_upper_bound();
+ if (rows == HA_POS_ERROR)
+ DBUG_RETURN(HA_POS_ERROR);
+ estimated_rows+= rows;
+ partitions_called++;
+ if (partitions_called >= check_min_num && estimated_rows)
+ {
+ DBUG_RETURN(estimated_rows * num_used_parts / partitions_called);
+ }
+ }
+ DBUG_RETURN(estimated_rows);
}
+
/*
Find number of records in a range
@@ -5975,22 +6016,9 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{
- handler **file;
- ha_rows in_range= 0;
DBUG_ENTER("ha_partition::records_in_range");
- file= m_file;
- do
- {
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- {
- ha_rows tmp_in_range= (*file)->records_in_range(inx, min_key, max_key);
- if (tmp_in_range == HA_POS_ERROR)
- DBUG_RETURN(tmp_in_range);
- in_range+= tmp_in_range;
- }
- } while (*(++file));
- DBUG_RETURN(in_range);
+ DBUG_RETURN(estimate_rows(TRUE, inx, min_key, max_key));
}
@@ -6006,22 +6034,36 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
ha_rows ha_partition::estimate_rows_upper_bound()
{
- ha_rows rows, tot_rows= 0;
- handler **file;
DBUG_ENTER("ha_partition::estimate_rows_upper_bound");
- file= m_file;
- do
- {
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- {
- rows= (*file)->estimate_rows_upper_bound();
- if (rows == HA_POS_ERROR)
- DBUG_RETURN(HA_POS_ERROR);
- tot_rows+= rows;
- }
- } while (*(++file));
- DBUG_RETURN(tot_rows);
+ DBUG_RETURN(estimate_rows(FALSE, 0, NULL, NULL));
+}
+
+
+/*
+ Get time to read
+
+ SYNOPSIS
+ read_time()
+ index Index number used
+ ranges Number of ranges
+ rows Number of rows
+
+ RETURN VALUE
+ time for read
+
+ DESCRIPTION
+ This will be optimised later to include whether or not the index can
+ be used with partitioning. To achieve we need to add another parameter
+ that specifies how many of the index fields that are bound in the ranges.
+ Possibly added as a new call to handlers.
+*/
+
+double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
+{
+ DBUG_ENTER("ha_partition::read_time");
+
+ DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index e2e6c674c5e..e6b7472786c 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -552,6 +552,18 @@ public:
-------------------------------------------------------------------------
*/
+private:
+ /*
+ Helper function to get the minimum number of partitions to use for
+ the optimizer hints/cost calls.
+ */
+ void partitions_optimizer_call_preparations(uint *num_used_parts,
+ uint *check_min_num,
+ uint *first);
+ ha_rows estimate_rows(bool is_records_in_range, uint inx,
+ key_range *min_key, key_range *max_key);
+public:
+
/*
keys_to_use_for_scanning can probably be implemented as the
intersection of all underlying handlers if mixed handlers are used.
diff --git a/sql/item.h b/sql/item.h
index 4dc65c52843..b7e6cc6c204 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -514,6 +514,13 @@ public:
char * name; /* Name from select */
/* Original item name (if it was renamed)*/
char * orig_name;
+ /**
+ Intrusive list pointer for free list. If not null, points to the next
+ Item on some Query_arena's free list. For instance, stored procedures
+ have their own Query_arena's.
+
+ @see Query_arena::free_list
+ */
Item *next;
uint32 max_length;
uint name_length; /* Length of name */
@@ -987,6 +994,23 @@ public:
return FALSE;
}
+ /**
+ Find a function of a given type
+
+ @param arg the function type to search (enum Item_func::Functype)
+ @return
+ @retval TRUE the function type we're searching for is found
+ @retval FALSE the function type wasn't found
+
+ @description
+ This function can be used (together with Item::walk()) to find functions
+ in an item tree fragment.
+ */
+ virtual bool find_function_processor (uchar *arg)
+ {
+ return FALSE;
+ }
+
/*
For SP local variable returns pointer to Item representing its
current value and pointer to current Item otherwise.
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 7d36481a23b..a393c886483 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -4218,6 +4218,16 @@ Create_func_rand::create_native(THD *thd, LEX_STRING name,
if (item_list != NULL)
arg_count= item_list->elements;
+ /*
+ When RAND() is binlogged, the seed is binlogged too. So the
+ sequence of random numbers is the same on a replication slave as
+ on the master. However, if several RAND() values are inserted
+ into a table, the order in which the rows are modified may differ
+ between master and slave, because the order is undefined. Hence,
+ the statement is unsafe to log in statement format.
+ */
+ thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
+
switch (arg_count) {
case 0:
{
diff --git a/sql/item_func.cc b/sql/item_func.cc
index a7b2609470f..75f8b2045b5 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -607,7 +607,7 @@ void Item_func::signal_divide_by_null()
Item *Item_func::get_tmp_table_item(THD *thd)
{
- if (!with_sum_func && !const_item() && functype() != SUSERVAR_FUNC)
+ if (!with_sum_func && !const_item())
return new Item_field(result_field);
return copy_or_same(thd);
}
diff --git a/sql/item_func.h b/sql/item_func.h
index e3690232904..6bfdae8d56d 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -215,6 +215,11 @@ public:
{
return has_timestamp_args();
}
+
+ virtual bool find_function_processor (uchar *arg)
+ {
+ return functype() == *(Functype *) arg;
+ }
};
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index d9ca1d17d82..f9af20a6cb9 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -677,8 +677,8 @@ String *Item_func_concat_ws::val_str(String *str)
res->length() + sep_str->length() + res2->length())
{
/* We have room in str; We can't get any errors here */
- if (str == res2)
- { // This is quote uncommon!
+ if (str->ptr() == res2->ptr())
+ { // This is quite uncommon!
str->replace(0,0,*sep_str);
str->replace(0,0,*res);
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3deeb9a0efe..fa612993847 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -3275,7 +3275,18 @@ START SLAVE; . Query: '%s'", expected_error, thd->query());
compare_errors:
- /*
+ /*
+ In the slave thread, we may sometimes execute some DROP / * 40005
+ TEMPORARY * / TABLE that come from parts of binlogs (likely if we
+ use RESET SLAVE or CHANGE MASTER TO), while the temporary table
+ has already been dropped. To ignore such irrelevant "table does
+ not exist errors", we silently clear the error if TEMPORARY was used.
+ */
+ if (thd->lex->sql_command == SQLCOM_DROP_TABLE && thd->lex->drop_temporary &&
+ thd->is_error() && thd->stmt_da->sql_errno() == ER_BAD_TABLE_ERROR &&
+ !expected_error)
+ thd->stmt_da->reset_diagnostics_area();
+ /*
If we expected a non-zero error code, and we don't get the same error
code, and it should be ignored or is related to a concurrency issue.
*/
@@ -8047,10 +8058,10 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
plus one or three bytes (see pack.c:net_store_length) for number of
elements in the field metadata array.
*/
- if (m_field_metadata_size > 255)
- m_data_size+= m_field_metadata_size + 3;
- else
+ if (m_field_metadata_size < 251)
m_data_size+= m_field_metadata_size + 1;
+ else
+ m_data_size+= m_field_metadata_size + 3;
bzero(m_null_bits, num_null_bytes);
for (unsigned int i= 0 ; i < m_table->s->fields ; ++i)
@@ -8843,6 +8854,24 @@ static bool record_compare(TABLE *table)
}
}
+ /**
+ Check if we are using MyISAM.
+
+ If this is a myisam table, then we cannot do a memcmp
+ right away because some NULL fields can still contain
+ an old value in the row - they are not shown to the user
+ because the null bit is set, however, the contents are
+ not cleared. As such, plain memory comparison cannot be
+ assured to work. See: BUG#49482 and BUG#49481.
+
+ On top of this, we do not store field contents for null
+ fields in the binlog, so this is extra important when
+ comparing records fetched from binlog and from storage
+ engine.
+ */
+ if (table->file->ht->db_type == DB_TYPE_MYISAM)
+ goto record_compare_field_by_field;
+
if (table->s->blob_fields + table->s->varchar_fields == 0)
{
result= cmp_record(table,record[1]);
@@ -8858,14 +8887,33 @@ static bool record_compare(TABLE *table)
goto record_compare_exit;
}
+record_compare_field_by_field:
+
/* Compare updated fields */
for (Field **ptr=table->field ; *ptr ; ptr++)
{
- if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length))
+ Field *f= *ptr;
+
+ /* if just one of the fields is null then there is no match */
+ if ((f->is_null_in_record(table->record[0])) ==
+ !(f->is_null_in_record(table->record[1])))
{
result= TRUE;
goto record_compare_exit;
}
+
+ /* if both fields are not null then we can compare */
+ if (!(f->is_null_in_record(table->record[0])) &&
+ !(f->is_null_in_record(table->record[1])))
+ {
+ if (f->cmp_binary_offset(table->s->rec_buff_length))
+ {
+ result= TRUE;
+ goto record_compare_exit;
+ }
+ }
+
+ /* if both fields are null then there is a match. compare next field */
}
record_compare_exit:
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 8670631cfa0..0bfbbfd7e49 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -330,6 +330,24 @@ static bool record_compare(TABLE *table)
}
}
+ /**
+ Check if we are using MyISAM.
+
+ If this is a myisam table, then we cannot do a memcmp
+ right away because some NULL fields can still contain
+ an old value in the row - they are not shown to the user
+ because the null bit is set, however, the contents are
+ not cleared. As such, plain memory comparison cannot be
+ assured to work. See: BUG#49482 and BUG#49481.
+
+ On top of this, we do not store field contents for null
+ fields in the binlog, so this is extra important when
+ comparing records fetched from binlog and from storage
+ engine.
+ */
+ if (table->file->ht->db_type == DB_TYPE_MYISAM)
+ goto record_compare_field_by_field;
+
if (table->s->blob_fields + table->s->varchar_fields == 0)
{
result= cmp_record(table,record[1]);
@@ -345,14 +363,33 @@ static bool record_compare(TABLE *table)
goto record_compare_exit;
}
+record_compare_field_by_field:
+
/* Compare updated fields */
for (Field **ptr=table->field ; *ptr ; ptr++)
{
- if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length))
+ Field *f= *ptr;
+
+ /* if just one of the fields is null then there is no match */
+ if ((f->is_null_in_record(table->record[0])) ==
+ !(f->is_null_in_record(table->record[1])))
{
result= TRUE;
goto record_compare_exit;
}
+
+ /* if both fields are not null then we can compare */
+ if (!(f->is_null_in_record(table->record[0])) &&
+ !(f->is_null_in_record(table->record[1])))
+ {
+ if (f->cmp_binary_offset(table->s->rec_buff_length))
+ {
+ result= TRUE;
+ goto record_compare_exit;
+ }
+ }
+
+ /* if both fields are null then there is a match. compare next field */
}
record_compare_exit:
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 9996a237cb4..8d06003e2e4 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -2799,8 +2799,15 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
m_lex->mark_as_requiring_prelocking(NULL);
}
thd->rollback_item_tree_changes();
- /* Update the state of the active arena. */
- thd->stmt_arena->state= Query_arena::EXECUTED;
+ /*
+ Update the state of the active arena if no errors on
+ open_tables stage.
+ */
+ if (!res || !thd->is_error() ||
+ (thd->stmt_da->sql_errno() != ER_CANT_REOPEN_TABLE &&
+ thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE &&
+ thd->stmt_da->sql_errno() != ER_UPDATE_TABLE_USED))
+ thd->stmt_arena->state= Query_arena::EXECUTED;
/*
Merge here with the saved parent's values
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index d62ac4642de..8a6413f0e5d 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -292,7 +292,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
{
TABLE *table;
READ_RECORD read_record_info;
- my_bool return_val= 1;
+ my_bool return_val= TRUE;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
char tmp_name[NAME_LEN+1];
int password_length;
@@ -605,7 +605,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
init_check_host();
initialized=1;
- return_val=0;
+ return_val= FALSE;
end:
thd->variables.sql_mode= old_sql_mode;
@@ -656,7 +656,7 @@ my_bool acl_reload(THD *thd)
DYNAMIC_ARRAY old_acl_hosts,old_acl_users,old_acl_dbs;
MEM_ROOT old_mem;
bool old_initialized;
- my_bool return_val= 1;
+ my_bool return_val= TRUE;
DBUG_ENTER("acl_reload");
if (thd->locked_tables)
@@ -683,8 +683,13 @@ my_bool acl_reload(THD *thd)
if (simple_open_n_lock_tables(thd, tables))
{
- sql_print_error("Fatal error: Can't open and lock privilege tables: %s",
- thd->stmt_da->message());
+ /*
+ Execution might have been interrupted; only print the error message
+ if an error condition has been raised.
+ */
+ if (thd->stmt_da->is_error())
+ sql_print_error("Fatal error: Can't open and lock privilege tables: %s",
+ thd->stmt_da->message());
goto end;
}
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 9c621944f2a..b8ba16f2fb1 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -683,7 +683,11 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
if (n++)
pfields.append(", ");
if (item->name)
+ {
+ pfields.append("`");
pfields.append(item->name);
+ pfields.append("`");
+ }
else
item->print(&pfields, QT_ORDINARY);
}
@@ -703,7 +707,9 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
val= lv++;
if (n++)
pfields.append(", ");
+ pfields.append("`");
pfields.append(item->name);
+ pfields.append("`");
pfields.append("=");
val->print(&pfields, QT_ORDINARY);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 5c272353664..5cd0bebf48a 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -623,8 +623,10 @@ void free_items(Item *item)
DBUG_VOID_RETURN;
}
-/* This works because items are allocated with sql_alloc() */
-
+/**
+ This works because items are allocated with sql_alloc().
+ @note The function also handles null pointers (empty list).
+*/
void cleanup_items(Item *item)
{
DBUG_ENTER("cleanup_items");
@@ -3282,17 +3284,6 @@ end_with_restore_list:
}
else
{
- /*
- If this is a slave thread, we may sometimes execute some
- DROP / * 40005 TEMPORARY * / TABLE
- that come from parts of binlogs (likely if we use RESET SLAVE or CHANGE
- MASTER TO), while the temporary table has already been dropped.
- To not generate such irrelevant "table does not exist errors",
- we silently add IF EXISTS if TEMPORARY was used.
- */
- if (thd->slave_thread)
- lex->drop_if_exists= 1;
-
/* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */
thd->variables.option_bits|= OPTION_KEEP_LOG;
}
@@ -6822,13 +6813,13 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
thd->thread_stack= (char*) &tmp_thd;
thd->store_globals();
}
-
+
if (thd)
{
bool reload_acl_failed= acl_reload(thd);
bool reload_grants_failed= grant_reload(thd);
bool reload_servers_failed= servers_reload(thd);
-
+
if (reload_acl_failed || reload_grants_failed || reload_servers_failed)
{
result= 1;
@@ -7001,7 +6992,10 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
if (options & REFRESH_USER_RESOURCES)
reset_mqh((LEX_USER *) NULL, 0); /* purecov: inspected */
*write_to_binlog= tmp_write_to_binlog;
- return result;
+ /*
+ If the query was killed then this function must fail.
+ */
+ return result || thd->killed;
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 00cc28e6213..436e979df1e 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -917,11 +917,14 @@ impossible position";
thd_proc_info(thd, "Finished reading one binlog; switching to next binlog");
switch (mysql_bin_log.find_next_log(&linfo, 1)) {
- case LOG_INFO_EOF:
- loop_breaker = (flags & BINLOG_DUMP_NON_BLOCK);
- break;
case 0:
break;
+ case LOG_INFO_EOF:
+ if (mysql_bin_log.is_active(log_file_name))
+ {
+ loop_breaker = (flags & BINLOG_DUMP_NON_BLOCK);
+ break;
+ }
default:
errmsg = "could not find next log";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4775a297b0c..1f8bb11967a 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -522,7 +522,7 @@ JOIN::prepare(Item ***rref_pointer_array,
thd->lex->allow_sum_func= save_allow_sum_func;
}
- if (!thd->lex->view_prepare_mode)
+ if (!thd->lex->view_prepare_mode && !(select_options & SELECT_DESCRIBE))
{
Item_subselect *subselect;
/* Is it subselect? */
@@ -7170,6 +7170,7 @@ static void update_depend_map(JOIN *join, ORDER *order)
table_map depend_map;
order->item[0]->update_used_tables();
order->depend_map=depend_map=order->item[0]->used_tables();
+ order->used= 0;
// Not item_sum(), RAND() and no reference to table outside of sub select
if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))
&& !order->item[0]->with_sum_func)
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index 141e00cf50f..a7109af9f64 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -264,8 +264,14 @@ bool servers_reload(THD *thd)
if (simple_open_n_lock_tables(thd, tables))
{
- sql_print_error("Can't open and lock privilege tables: %s",
- thd->stmt_da->message());
+ /*
+ Execution might have been interrupted; only print the error message
+ if an error condition has been raised.
+ */
+ if (thd->stmt_da->is_error())
+ sql_print_error("Can't open and lock privilege tables: %s",
+ thd->stmt_da->message());
+ return_val= FALSE;
goto end;
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 5bdff4dc9bf..ad14d2e4ecd 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -335,6 +335,35 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
}
}
+ /*
+ Disable the usage of fulltext searches in the last union branch.
+ This is a temporary 5.x limitation because of the way the fulltext
+ search functions are handled by the optimizer.
+ This is manifestation of the more general problems of "taking away"
+ parts of a SELECT statement post-fix_fields(). This is generally not
+ doable since various flags are collected in various places (e.g.
+ SELECT_LEX) that carry information about the presence of certain
+ expressions or constructs in the parts of the query.
+ When part of the query is taken away it's not clear how to "divide"
+ the meaning of these accumulated flags and what to carry over to the
+ recipient query (SELECT_LEX).
+ */
+ if (global_parameters->ftfunc_list->elements &&
+ global_parameters->order_list.elements &&
+ global_parameters != fake_select_lex)
+ {
+ ORDER *ord;
+ Item_func::Functype ft= Item_func::FT_FUNC;
+ for (ord= (ORDER*)global_parameters->order_list.first; ord; ord= ord->next)
+ if ((*ord->item)->walk (&Item::find_function_processor, FALSE,
+ (uchar *) &ft))
+ {
+ my_error (ER_CANT_USE_OPTION_HERE, MYF(0), "MATCH()");
+ goto err;
+ }
+ }
+
+
create_options= (first_sl->options | thd_arg->variables.option_bits |
TMP_TABLE_ALL_COLUMNS);
/*
@@ -669,7 +698,7 @@ bool st_select_lex_unit::cleanup()
{
ORDER *ord;
for (ord= (ORDER*)global_parameters->order_list.first; ord; ord= ord->next)
- (*ord->item)->cleanup();
+ (*ord->item)->walk (&Item::cleanup_processor, 0, 0);
}
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index e3f85c6215d..4e5941a87a0 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -24,6 +24,7 @@
#include "sp_head.h"
#include "sql_trigger.h"
#include "probes_mysql.h"
+#include "debug_sync.h"
/* Return 0 if row hasn't changed */
@@ -1137,8 +1138,11 @@ reopen_tables:
items from 'fields' list, so the cleanup above is necessary to.
*/
cleanup_items(thd->free_list);
-
+ cleanup_items(thd->stmt_arena->free_list);
close_tables_for_reopen(thd, &table_list);
+
+ DEBUG_SYNC(thd, "multi_update_reopen_tables");
+
goto reopen_tables;
}