summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2011-11-22 18:04:38 +0100
committerSergei Golubchik <sergii@pisem.net>2011-11-22 18:04:38 +0100
commitd2755a2c9c109ddb4e2e0c9feda89431a6c4fd50 (patch)
treec6e4678908c750d7f558e98cedc349aa1d350892 /sql
parentaf32b02c06f32a89dc9f52e556bc5dd3bf49c19e (diff)
parent42221abaed700f6dc5d280b462755851780e8487 (diff)
downloadmariadb-git-d2755a2c9c109ddb4e2e0c9feda89431a6c4fd50.tar.gz
5.3->5.5 merge
Diffstat (limited to 'sql')
-rw-r--r--sql/debug_sync.cc4
-rw-r--r--sql/event_scheduler.cc2
-rw-r--r--sql/filesort.cc31
-rw-r--r--sql/ha_ndbcluster_binlog.cc2
-rw-r--r--sql/handler.cc15
-rw-r--r--sql/handler.h11
-rw-r--r--sql/item.cc43
-rw-r--r--sql/item.h39
-rw-r--r--sql/item_cmpfunc.cc98
-rw-r--r--sql/item_cmpfunc.h5
-rw-r--r--sql/item_func.cc5
-rw-r--r--sql/item_row.cc2
-rw-r--r--sql/item_strfunc.cc144
-rw-r--r--sql/item_subselect.cc244
-rw-r--r--sql/item_subselect.h123
-rw-r--r--sql/item_sum.cc4
-rw-r--r--sql/item_timefunc.cc8
-rw-r--r--sql/lex.h2
-rw-r--r--sql/log.cc39
-rw-r--r--sql/log_event.cc3
-rw-r--r--sql/log_event.h2
-rw-r--r--sql/multi_range_read.cc23
-rw-r--r--sql/mysqld.cc112
-rw-r--r--sql/mysqld.h7
-rw-r--r--sql/opt_index_cond_pushdown.cc90
-rw-r--r--sql/opt_range.cc28
-rw-r--r--sql/opt_range.h7
-rw-r--r--sql/opt_subselect.cc296
-rw-r--r--sql/opt_sum.cc4
-rw-r--r--sql/opt_table_elimination.cc4
-rw-r--r--sql/records.cc24
-rw-r--r--sql/records.h2
-rw-r--r--sql/rpl_rli.h8
-rw-r--r--sql/share/errmsg-utf8.txt4
-rw-r--r--sql/slave.cc11
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sp.cc44
-rw-r--r--sql/sp.h28
-rw-r--r--sql/sp_head.cc4
-rw-r--r--sql/sp_head.h11
-rw-r--r--sql/sp_rcontext.cc2
-rw-r--r--sql/sql_acl.cc63
-rw-r--r--sql/sql_base.cc32
-rw-r--r--sql/sql_cache.cc7
-rw-r--r--sql/sql_class.cc119
-rw-r--r--sql/sql_class.h66
-rw-r--r--sql/sql_connect.cc17
-rw-r--r--sql/sql_db.cc1
-rw-r--r--sql/sql_delete.cc16
-rw-r--r--sql/sql_insert.cc28
-rw-r--r--sql/sql_join_cache.cc76
-rw-r--r--sql/sql_lex.cc131
-rw-r--r--sql/sql_lex.h13
-rw-r--r--sql/sql_list.h21
-rw-r--r--sql/sql_load.cc12
-rw-r--r--sql/sql_parse.cc159
-rw-r--r--sql/sql_parse.h2
-rw-r--r--sql/sql_plugin.cc15
-rw-r--r--sql/sql_prepare.cc1
-rw-r--r--sql/sql_priv.h6
-rw-r--r--sql/sql_repl.cc8
-rw-r--r--sql/sql_select.cc327
-rw-r--r--sql/sql_select.h22
-rw-r--r--sql/sql_show.cc18
-rw-r--r--sql/sql_union.cc1
-rw-r--r--sql/sql_update.cc16
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy52
-rw-r--r--sql/structs.h9
-rw-r--r--sql/sys_vars.cc40
-rw-r--r--sql/sys_vars.h113
-rw-r--r--sql/table.cc20
72 files changed, 1985 insertions, 965 deletions
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index eb8e3b093c9..2f356122687 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1123,7 +1123,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
point decremented it to 0. In this case the following happened:
- an error message was reported with my_error() and
- - the statement was killed with thd->killed= THD::KILL_QUERY.
+ - the statement was killed with thd->killed= KILL_QUERY.
If a statement reports an error, it must not call send_ok().
The calling functions will not call send_ok(), if we return TRUE
@@ -1838,7 +1838,7 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
{
if (!--action->hit_limit)
{
- thd->killed= THD::KILL_QUERY;
+ thd->killed= KILL_QUERY;
my_error(ER_DEBUG_SYNC_HIT_LIMIT, MYF(0));
}
DBUG_PRINT("debug_sync_exec", ("hit_limit: %lu at: '%s'",
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index 8944b749305..55a3f6b36c4 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -659,7 +659,7 @@ Event_scheduler::stop()
sql_print_information("Event Scheduler: Killing the scheduler thread, "
"thread id %lu",
scheduler_thd->thread_id);
- scheduler_thd->awake(THD::KILL_CONNECTION);
+ scheduler_thd->awake(KILL_CONNECTION);
mysql_mutex_unlock(&scheduler_thd->LOCK_thd_data);
/* thd could be 0x0, when shutting down */
diff --git a/sql/filesort.cc b/sql/filesort.cc
index ebef3b2716b..a9db198e9d3 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -526,7 +526,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
my_off_t record;
TABLE *sort_form;
THD *thd= current_thd;
- volatile THD::killed_state *killed= &thd->killed;
+ volatile killed_state *killed= &thd->killed;
handler *file;
MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set;
DBUG_ENTER("find_all_keys");
@@ -566,12 +566,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
/* Temporary set for register_used_fields and register_field_in_read_map */
sort_form->read_set= &sort_form->tmp_set;
register_used_fields(param);
- if (select && select->cond)
- select->cond->walk(&Item::register_field_in_read_map, 1,
- (uchar*) sort_form);
- if (select && select->pre_idx_push_select_cond)
- select->pre_idx_push_select_cond->walk(&Item::register_field_in_read_map,
- 1, (uchar*) sort_form);
+ Item *sort_cond= !select ?
+ 0 : !select->pre_idx_push_select_cond ?
+ select->cond : select->pre_idx_push_select_cond;
+ if (sort_cond)
+ sort_cond->walk(&Item::register_field_in_read_map, 1, (uchar*) sort_form);
sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set,
&sort_form->tmp_set);
@@ -644,15 +643,21 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
SQL_SELECT::skip_record evaluates this condition. it may include a
correlated subquery predicate, such that some field in the subquery
refers to 'sort_form'.
+
+ PSergey-todo: discuss the above with Timour.
*/
+ MY_BITMAP *tmp_read_set= sort_form->read_set;
+ MY_BITMAP *tmp_write_set= sort_form->write_set;
+ MY_BITMAP *tmp_vcol_set= sort_form->vcol_set;
+
if (select->cond->with_subselect)
sort_form->column_bitmaps_set(save_read_set, save_write_set,
save_vcol_set);
write_record= (select->skip_record(thd) > 0);
if (select->cond->with_subselect)
- sort_form->column_bitmaps_set(&sort_form->tmp_set,
- &sort_form->tmp_set,
- &sort_form->tmp_set);
+ sort_form->column_bitmaps_set(tmp_read_set,
+ tmp_write_set,
+ tmp_vcol_set);
}
else
write_record= true;
@@ -1243,9 +1248,9 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
void *first_cmp_arg;
element_count dupl_count= 0;
uchar *src;
- THD::killed_state not_killable;
+ killed_state not_killable;
uchar *unique_buff= param->unique_buff;
- volatile THD::killed_state *killed= &current_thd->killed;
+ volatile killed_state *killed= &current_thd->killed;
DBUG_ENTER("merge_buffers");
status_var_increment(current_thd->status_var.filesort_merge_passes);
@@ -1253,7 +1258,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
if (param->not_killable)
{
killed= &not_killable;
- not_killable= THD::NOT_KILLED;
+ not_killable= NOT_KILLED;
}
error=0;
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 040b5778dbd..3d140b92977 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -1870,7 +1870,7 @@ static void ndb_binlog_query(THD *thd, Cluster_schema *schema)
else
thd->server_id= schema->any_value;
thd->db= schema->db;
- int errcode = query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode = query_error_code(thd, thd->killed == NOT_KILLED);
thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
schema->query_length, FALSE, TRUE,
schema->name[0] == 0 || thd->db[0] == 0,
diff --git a/sql/handler.cc b/sql/handler.cc
index 90889e046e9..1d0f676493d 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1478,9 +1478,13 @@ int ha_rollback_trans(THD *thd, bool all)
slave SQL thread, it would not stop the thread but just be printed in
the error log; but we don't want users to wonder why they have this
message in the error log, so we don't send it.
+
+ We don't have to test for thd->killed == KILL_SYSTEM_THREAD as
+ it doesn't matter if a warning is pushed to a system thread or not:
+ No one will see it...
*/
if (is_real_trans && thd->transaction.all.modified_non_trans_table &&
- !thd->slave_thread && thd->killed != THD::KILL_CONNECTION)
+ !thd->slave_thread && thd->killed < KILL_CONNECTION)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
@@ -2609,7 +2613,7 @@ int handler::update_auto_increment()
/*
first test if the query was aborted due to strict mode constraints
*/
- if (thd->killed == THD::KILL_BAD_DATA)
+ if (killed_mask_hard(thd->killed) == KILL_BAD_DATA)
DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
@@ -4779,7 +4783,7 @@ static bool check_table_binlog_row_based(THD *thd, TABLE *table)
/** @brief
Write table maps for all (manually or automatically) locked tables
- to the binary log. Also, if binlog_annotate_rows_events is ON,
+ to the binary log. Also, if binlog_annotate_row_events is ON,
write Annotate_rows event before the first table map.
SYNOPSIS
@@ -4812,7 +4816,7 @@ static int write_locked_table_maps(THD *thd)
MYSQL_LOCK *locks[2];
locks[0]= thd->extra_lock;
locks[1]= thd->lock;
- my_bool with_annotate= thd->variables.binlog_annotate_rows_events &&
+ my_bool with_annotate= thd->variables.binlog_annotate_row_events &&
thd->query() && thd->query_length();
for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
@@ -4996,6 +5000,9 @@ int handler::ha_reset()
/* reset the bitmaps to point to defaults */
table->default_column_bitmaps();
pushed_cond= NULL;
+ /* Reset information about pushed engine conditions */
+ cancel_pushed_idx_cond();
+ /* Reset information about pushed index conditions */
DBUG_RETURN(reset());
}
diff --git a/sql/handler.h b/sql/handler.h
index 7a75253f040..fc38794a410 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1788,7 +1788,7 @@ public:
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0),
estimation_rows_to_insert(0), ht(ht_arg),
- ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
+ ref(0), end_range(NULL), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
in_range_check_pushed_down(FALSE),
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE),
@@ -1845,6 +1845,7 @@ public:
DBUG_ENTER("ha_rnd_init");
DBUG_ASSERT(inited==NONE || (inited==RND && scan));
inited= (result= rnd_init(scan)) ? NONE: RND;
+ end_range= NULL;
DBUG_RETURN(result);
}
int ha_rnd_end()
@@ -1852,6 +1853,7 @@ public:
DBUG_ENTER("ha_rnd_end");
DBUG_ASSERT(inited==RND);
inited=NONE;
+ end_range= NULL;
DBUG_RETURN(rnd_end());
}
int ha_rnd_init_with_error(bool scan) __attribute__ ((warn_unused_result));
@@ -2567,6 +2569,13 @@ public:
*/
virtual void cond_pop() { return; };
virtual Item *idx_cond_push(uint keyno, Item* idx_cond) { return idx_cond; }
+ /** Reset information about pushed index conditions */
+ virtual void cancel_pushed_idx_cond()
+ {
+ pushed_idx_cond= NULL;
+ pushed_idx_cond_keyno= MAX_KEY;
+ in_range_check_pushed_down= false;
+ }
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
diff --git a/sql/item.cc b/sql/item.cc
index d768c7a4218..028cc8c8e30 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -806,7 +806,9 @@ bool Item_ident::remove_dependence_processor(uchar * arg)
bool Item_ident::collect_outer_ref_processor(uchar *param)
{
Collect_deps_prm *prm= (Collect_deps_prm *)param;
- if (depended_from && depended_from->nest_level < prm->nest_level)
+ if (depended_from &&
+ depended_from->nest_level_base == prm->nest_level_base &&
+ depended_from->nest_level < prm->nest_level)
prm->parameters->add_unique(this, &cmp_items);
return FALSE;
}
@@ -2343,6 +2345,11 @@ bool Item_field::enumerate_field_refs_processor(uchar *arg)
return FALSE;
}
+bool Item_field::update_table_bitmaps_processor(uchar *arg)
+{
+ update_table_bitmaps();
+ return FALSE;
+}
const char *Item_ident::full_name() const
{
@@ -5011,6 +5018,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
if (!outer_fixed && cached_table && cached_table->select_lex &&
context->select_lex &&
cached_table->select_lex != context->select_lex &&
+ !context->select_lex->is_merged_child_of(cached_table->select_lex) &&
is_outer_table(cached_table, context->select_lex))
{
int ret;
@@ -5544,6 +5552,10 @@ Field *Item::make_string_field(TABLE *table)
{
Field *field;
DBUG_ASSERT(collation.collation);
+ /*
+ Note: the following check is repeated in
+ subquery_types_allow_materialization():
+ */
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB)
field= new Field_blob(max_length, maybe_null, name,
collation.collation, TRUE);
@@ -6345,7 +6357,7 @@ bool Item::send(Protocol *protocol, String *buffer)
case MYSQL_TYPE_TIMESTAMP:
{
MYSQL_TIME tm;
- get_date(&tm, TIME_FUZZY_DATE);
+ get_date(&tm, TIME_FUZZY_DATE | sql_mode_for_dates());
if (!null_value)
{
if (f_type == MYSQL_TYPE_DATE)
@@ -7746,7 +7758,11 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference)
((*ref)->fix_fields(thd, ref)))
return TRUE;
- return Item_direct_ref::fix_fields(thd, reference);
+ if (Item_direct_ref::fix_fields(thd, reference))
+ return TRUE;
+ if (view->table && view->table->maybe_null)
+ maybe_null= TRUE;
+ return FALSE;
}
/*
@@ -9521,6 +9537,12 @@ table_map Item_direct_view_ref::used_tables() const
(view->merged ? (*ref)->used_tables() : view->table->map);
}
+table_map Item_direct_view_ref::not_null_tables() const
+{
+ return get_depended_from() ?
+ 0 :
+ (view->merged ? (*ref)->not_null_tables() : view->table->map);
+}
/*
we add RAND_TABLE_BIT to prevent moving this item from HAVING to WHERE
@@ -9533,7 +9555,22 @@ table_map Item_ref_null_helper::used_tables() const
}
+/* Debugger help function */
+static char dbug_item_print_buf[256];
+const char *dbug_print_item(Item *item)
+{
+ char *buf= dbug_item_print_buf;
+ String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin);
+ str.length(0);
+ if (!item)
+ return "(Item*)NULL";
+ item->print(&str ,QT_ORDINARY);
+ if (str.c_ptr() == buf)
+ return buf;
+ else
+ return "Couldn't fit into buffer";
+}
/*****************************************************************************
** Instantiate templates
*****************************************************************************/
diff --git a/sql/item.h b/sql/item.h
index c3bdc5042c8..9186ba084db 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -541,6 +541,7 @@ typedef void (*Cond_traverser) (const Item *item, void *arg);
class Item_equal;
class COND_EQUAL;
+class st_select_lex_unit;
class Item {
Item(const Item &); /* Prevent use of these */
@@ -1144,9 +1145,11 @@ public:
virtual bool mark_as_eliminated_processor(uchar *arg) { return 0; }
virtual bool eliminate_subselect_processor(uchar *arg) { return 0; }
virtual bool set_fake_select_as_master_processor(uchar *arg) { return 0; }
+ virtual bool update_table_bitmaps_processor(uchar *arg) { return 0; }
virtual bool view_used_tables_processor(uchar *arg) { return 0; }
virtual bool eval_not_null_tables(uchar *opt_arg) { return 0; }
virtual bool clear_sum_processor(uchar *opt_arg) { return 0; }
+ virtual bool is_subquery_processor (uchar *opt_arg) { return 0; }
/* To call bool function for all arguments */
struct bool_func_call_args
@@ -1288,8 +1291,10 @@ public:
}
struct Collect_deps_prm
{
- int nest_level;
List<Item> *parameters;
+ /* unit from which we count nest_level */
+ st_select_lex_unit *nest_level_base;
+ int nest_level;
};
/**
Collect outer references
@@ -1461,6 +1466,8 @@ public:
be defined for Item_func.
*/
virtual void get_cache_parameters(List<Item> &parameters) { };
+
+ virtual void mark_as_condition_AND_part(TABLE_LIST *embedding) {};
};
@@ -2022,6 +2029,20 @@ public:
bool get_date_result(MYSQL_TIME *ltime,ulonglong fuzzydate);
bool is_null() { return field->is_null(); }
void update_null_value();
+ void update_table_bitmaps()
+ {
+ if (field && field->table)
+ {
+ TABLE *tab= field->table;
+ tab->covering_keys.intersect(field->part_of_key);
+ tab->merge_keys.merge(field->part_of_key);
+ if (tab->read_set)
+ bitmap_fast_test_and_set(tab->read_set, field->field_index);
+ if (field->vcol_info)
+ tab->mark_virtual_col(field);
+ }
+ }
+ void update_used_tables() { update_table_bitmaps(); }
Item *get_tmp_table_item(THD *thd);
bool collect_item_field_processor(uchar * arg);
bool add_field_to_set_processor(uchar * arg);
@@ -2033,6 +2054,7 @@ public:
bool vcol_in_partition_func_processor(uchar *bool_arg);
bool check_vcol_func_processor(uchar *arg) { return FALSE;}
bool enumerate_field_refs_processor(uchar *arg);
+ bool update_table_bitmaps_processor(uchar *arg);
void cleanup();
Item_equal *get_item_equal() { return item_equal; }
void set_item_equal(Item_equal *item_eq) { item_equal= item_eq; }
@@ -3153,6 +3175,7 @@ public:
Item *equal_fields_propagator(uchar *arg);
Item *replace_equal_field(uchar *arg);
table_map used_tables() const;
+ table_map not_null_tables() const;
bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
{
return (*ref)->walk(processor, walk_subquery, arg) ||
@@ -3848,6 +3871,20 @@ public:
{ return test(example && example->basic_const_item());}
virtual void clear() { null_value= TRUE; value_cached= FALSE; }
bool is_null() { return null_value; }
+ virtual bool is_expensive()
+ {
+ DBUG_ASSERT(example);
+ if (value_cached)
+ return false;
+ return example->is_expensive();
+ }
+ bool is_expensive_processor(uchar *arg)
+ {
+ DBUG_ASSERT(example);
+ if (value_cached)
+ return false;
+ return example->is_expensive_processor(arg);
+ }
};
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 7a062b413ce..14c8c9b9138 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -33,8 +33,6 @@
#include "sql_time.h" // make_truncated_value_warning
#include "sql_base.h" // dynamic_column_error_message
-static bool convert_const_to_int(THD *, Item_field *, Item **);
-
static Item_result item_store_type(Item_result a, Item *item,
my_bool unsigned_flag)
{
@@ -519,7 +517,6 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
void Item_bool_func2::fix_length_and_dec()
{
max_length= 1; // Function returns 0 or 1
- THD *thd;
/*
As some compare functions are generated after sql_yacc,
@@ -551,14 +548,14 @@ void Item_bool_func2::fix_length_and_dec()
/*
Make a special case of compare with fields to get nicer comparisons
- of numbers with constant string.
+ of bigint numbers with constant string.
This directly contradicts the manual (number and a string should
be compared as doubles), but seems to provide more
"intuitive" behavior in some cases (but less intuitive in others).
But disable conversion in case of LIKE function.
*/
- thd= current_thd;
+ THD *thd= current_thd;
if (functype() != LIKE_FUNC && !thd->lex->is_ps_or_view_context_analysis())
{
int field;
@@ -566,7 +563,8 @@ void Item_bool_func2::fix_length_and_dec()
args[field= 1]->real_item()->type() == FIELD_ITEM)
{
Item_field *field_item= (Item_field*) (args[field]->real_item());
- if (field_item->cmp_type() == INT_RESULT &&
+ if ((field_item->field_type() == MYSQL_TYPE_LONGLONG ||
+ field_item->field_type() == MYSQL_TYPE_YEAR) &&
convert_const_to_int(thd, field_item, &args[!field]))
args[0]->cmp_context= args[1]->cmp_context= INT_RESULT;
}
@@ -1401,6 +1399,16 @@ bool Item_in_optimizer::is_top_level_item()
}
+void Item_in_optimizer::fix_after_pullout(st_select_lex *new_parent, Item **ref)
+{
+ /* This will re-calculate attributes of our Item_in_subselect: */
+ Item_bool_func::fix_after_pullout(new_parent, ref);
+
+ /* Then, re-calculate not_null_tables_cache: */
+ eval_not_null_tables(NULL);
+}
+
+
bool Item_in_optimizer::eval_not_null_tables(uchar *opt_arg)
{
not_null_tables_cache= 0;
@@ -1424,6 +1432,7 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
cache->setup(args[0]);
if (cache->cols() == 1)
{
+ DBUG_ASSERT(args[0]->type() != ROW_ITEM);
/*
Note: there can be cases when used_tables()==0 && !const_item(). See
Item_sum::update_used_tables for details.
@@ -1438,6 +1447,14 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
uint n= cache->cols();
for (uint i= 0; i < n; i++)
{
+ /* Check that the expression (part of row) do not contain a subquery */
+ if (args[0]->element_index(i)->walk(&Item::is_subquery_processor,
+ FALSE, NULL))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "SUBQUERY in ROW in left expression of IN/ALL/ANY");
+ return 1;
+ }
Item *element=args[0]->element_index(i);
if (element->used_tables() || !element->const_item())
((Item_cache *)cache->element_index(i))->set_used_tables(OUTER_REF_TABLE_BIT);
@@ -1784,7 +1801,7 @@ Item *Item_in_optimizer::transform(Item_transformer transformer, uchar *argument
if (!new_item)
return 0;
if (args[1] != new_item)
- current_thd->change_item_tree(args, new_item);
+ current_thd->change_item_tree(args + 1, new_item);
}
else
{
@@ -2135,6 +2152,14 @@ bool Item_func_between::eval_not_null_tables(uchar *opt_arg)
}
+void Item_func_between::fix_after_pullout(st_select_lex *new_parent, Item **ref)
+{
+ /* This will re-calculate attributes of the arguments */
+ Item_func_opt_neg::fix_after_pullout(new_parent, ref);
+ /* Then, re-calculate not_null_tables_cache according to our special rules */
+ eval_not_null_tables(NULL);
+}
+
void Item_func_between::fix_length_and_dec()
{
THD *thd= current_thd;
@@ -2169,7 +2194,8 @@ void Item_func_between::fix_length_and_dec()
!thd->lex->is_ps_or_view_context_analysis())
{
Item_field *field_item= (Item_field*) (args[0]->real_item());
- if (field_item->cmp_type() == INT_RESULT)
+ if (field_item->field_type() == MYSQL_TYPE_LONGLONG ||
+ field_item->field_type() == MYSQL_TYPE_YEAR)
{
/*
The following can't be recoded with || as convert_const_to_int
@@ -2519,6 +2545,15 @@ Item_func_if::eval_not_null_tables(uchar *opt_arg)
}
+void Item_func_if::fix_after_pullout(st_select_lex *new_parent, Item **ref)
+{
+ /* This will re-calculate attributes of the arguments */
+ Item_func::fix_after_pullout(new_parent, ref);
+ /* Then, re-calculate not_null_tables_cache according to our special rules */
+ eval_not_null_tables(NULL);
+}
+
+
void
Item_func_if::fix_length_and_dec()
{
@@ -2771,7 +2806,7 @@ Item *Item_func_case::find_item(String *str)
{
if (args[i]->real_item()->type() == NULL_ITEM)
continue;
- cmp_type= item_cmp_type(left_result_type, args[i]->result_type());
+ cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type());
DBUG_ASSERT(cmp_type != ROW_RESULT);
DBUG_ASSERT(cmp_items[(uint)cmp_type]);
if (!(value_added_map & (1<<(uint)cmp_type)))
@@ -2971,7 +3006,7 @@ void Item_func_case::fix_length_and_dec()
{
uint i;
agg[0]= args[first_expr_num];
- left_result_type= agg[0]->result_type();
+ left_result_type= agg[0]->cmp_type();
/*
As the first expression and WHEN expressions
@@ -3023,14 +3058,18 @@ void Item_func_case::fix_length_and_dec()
change_item_tree_if_needed(thd, &args[nagg * 2], agg[nagg + 1]);
}
+ Item *date_arg= 0;
for (i= 0; i <= (uint)TIME_RESULT; i++)
{
if (found_types & (1 << i) && !cmp_items[i])
{
DBUG_ASSERT((Item_result)i != ROW_RESULT);
- DBUG_ASSERT((Item_result)i != TIME_RESULT);
+
+ if ((Item_result)i == TIME_RESULT)
+ date_arg= find_date_time_item(args, arg_count, 0);
+
if (!(cmp_items[i]=
- cmp_item::get_comparator((Item_result)i, 0,
+ cmp_item::get_comparator((Item_result)i, date_arg,
cmp_collation.collation)))
return;
}
@@ -3624,10 +3663,13 @@ void cmp_item_row::store_value(Item *item)
for (uint i=0; i < n; i++)
{
if (!comparators[i])
+ {
+ DBUG_ASSERT(item->element_index(i)->cmp_type() != TIME_RESULT);
if (!(comparators[i]=
cmp_item::get_comparator(item->element_index(i)->result_type(), 0,
item->element_index(i)->collation.collation)))
break; // new failed
+ }
comparators[i]->store_value(item->element_index(i));
item->null_value|= item->element_index(i)->null_value;
}
@@ -3830,6 +3872,14 @@ Item_func_in::eval_not_null_tables(uchar *opt_arg)
}
+void Item_func_in::fix_after_pullout(st_select_lex *new_parent, Item **ref)
+{
+ /* This will re-calculate attributes of the arguments */
+ Item_func_opt_neg::fix_after_pullout(new_parent, ref);
+ /* Then, re-calculate not_null_tables_cache according to our special rules */
+ eval_not_null_tables(NULL);
+}
+
static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
{
return cs->coll->strnncollsp(cs,
@@ -3928,7 +3978,8 @@ void Item_func_in::fix_length_and_dec()
!thd->lex->is_view_context_analysis() && cmp_type != INT_RESULT)
{
Item_field *field_item= (Item_field*) (args[0]->real_item());
- if (field_item->cmp_type() == INT_RESULT)
+ if (field_item->field_type() == MYSQL_TYPE_LONGLONG ||
+ field_item->field_type() == MYSQL_TYPE_YEAR)
{
bool all_converted= TRUE;
for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
@@ -4151,13 +4202,10 @@ Item_cond::fix_fields(THD *thd, Item **ref)
DBUG_ASSERT(fixed == 0);
List_iterator<Item> li(list);
Item *item;
- TABLE_LIST *save_emb_on_expr_nest= thd->thd_marker.emb_on_expr_nest;
uchar buff[sizeof(char*)]; // Max local vars in function
not_null_tables_cache= used_tables_cache= 0;
const_item_cache= 1;
- if (functype() != COND_AND_FUNC)
- thd->thd_marker.emb_on_expr_nest= NULL;
/*
and_table_cache is the value that Item_cond_or() returns for
not_null_tables()
@@ -4217,7 +4265,6 @@ Item_cond::fix_fields(THD *thd, Item **ref)
maybe_null=1;
}
thd->lex->current_select->cond_count+= list.elements;
- thd->thd_marker.emb_on_expr_nest= save_emb_on_expr_nest;
fix_length_and_dec();
fixed= 1;
return FALSE;
@@ -4229,6 +4276,7 @@ Item_cond::eval_not_null_tables(uchar *opt_arg)
{
Item *item;
List_iterator<Item> li(list);
+ not_null_tables_cache= (table_map) 0;
and_tables_cache= ~(table_map) 0;
while ((item=li++))
{
@@ -4487,6 +4535,17 @@ void Item_cond::neg_arguments(THD *thd)
}
+void Item_cond_and::mark_as_condition_AND_part(TABLE_LIST *embedding)
+{
+ List_iterator<Item> li(list);
+ Item *item;
+ while ((item=li++))
+ {
+ item->mark_as_condition_AND_part(embedding);
+ }
+}
+
+
/**
Evaluation of AND(expr, expr, expr ...).
@@ -5740,7 +5799,7 @@ longlong Item_equal::val_int()
void Item_equal::fix_length_and_dec()
{
Item *item= get_first(NULL);
- eval_item= cmp_item::get_comparator(item->result_type(), 0,
+ eval_item= cmp_item::get_comparator(item->cmp_type(), item,
item->collation.collation);
}
@@ -5843,7 +5902,6 @@ Item* Item_equal::get_first(Item *field_item)
{
Item_equal_fields_iterator it(*this);
Item *item;
- JOIN_TAB *field_tab;
if (!field_item)
return (it++);
Field *field= ((Item_field *) (field_item->real_item()))->field;
@@ -5868,8 +5926,6 @@ Item* Item_equal::get_first(Item *field_item)
in presense of SJM nests.
*/
- field_tab= field->table->reginfo.join_tab;
-
TABLE_LIST *emb_nest= field->table->pos_in_table_list->embedding;
if (emb_nest && emb_nest->sj_mat_info && emb_nest->sj_mat_info->is_used)
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index ef12364ae68..1aa48034566 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -267,6 +267,7 @@ public:
virtual void get_cache_parameters(List<Item> &parameters);
bool is_top_level_item();
bool eval_not_null_tables(uchar *opt_arg);
+ void fix_after_pullout(st_select_lex *new_parent, Item **ref);
};
class Comp_creator
@@ -679,6 +680,7 @@ public:
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
uint decimal_precision() const { return 1; }
bool eval_not_null_tables(uchar *opt_arg);
+ void fix_after_pullout(st_select_lex *new_parent, Item **ref);
};
@@ -785,6 +787,7 @@ public:
uint decimal_precision() const;
const char *func_name() const { return "if"; }
bool eval_not_null_tables(uchar *opt_arg);
+ void fix_after_pullout(st_select_lex *new_parent, Item **ref);
};
@@ -1323,6 +1326,7 @@ public:
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
bool eval_not_null_tables(uchar *opt_arg);
+ void fix_after_pullout(st_select_lex *new_parent, Item **ref);
};
class cmp_item_row :public cmp_item
@@ -1807,6 +1811,7 @@ public:
return item;
}
Item *neg_transformer(THD *thd);
+ void mark_as_condition_AND_part(TABLE_LIST *embedding);
};
inline bool is_cond_and(Item *item)
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 1c6a9c23b26..c771bfbf133 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -177,9 +177,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
Item **arg,**arg_end;
- TABLE_LIST *save_emb_on_expr_nest= thd->thd_marker.emb_on_expr_nest;
uchar buff[STACK_BUFF_ALLOC]; // Max argument in function
- thd->thd_marker.emb_on_expr_nest= NULL;
used_tables_cache= not_null_tables_cache= 0;
const_item_cache=1;
@@ -233,7 +231,6 @@ Item_func::fix_fields(THD *thd, Item **ref)
if (thd->is_error()) // An error inside fix_length_and_dec occured
return TRUE;
fixed= 1;
- thd->thd_marker.emb_on_expr_nest= save_emb_on_expr_nest;
return FALSE;
}
@@ -5497,7 +5494,7 @@ void Item_func_get_system_var::fix_length_and_dec()
case SHOW_LONG:
case SHOW_INT:
case SHOW_HA_ROWS:
- unsigned_flag= TRUE;
+ unsigned_flag= TRUE; //var->show_type() != SHOW_INT;
collation.set_numeric();
fix_char_length(MY_INT64_NUM_DECIMAL_DIGITS);
decimals=0;
diff --git a/sql/item_row.cc b/sql/item_row.cc
index df03b0e0ebb..ccd7a37e9b7 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -156,11 +156,13 @@ void Item_row::fix_after_pullout(st_select_lex *new_parent, Item **ref)
{
used_tables_cache= 0;
const_item_cache= 1;
+ not_null_tables_cache= 0;
for (uint i= 0; i < arg_count; i++)
{
items[i]->fix_after_pullout(new_parent, &items[i]);
used_tables_cache|= items[i]->used_tables();
const_item_cache&= items[i]->const_item();
+ not_null_tables_cache|= items[i]->not_null_tables();
}
}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 1b3c1a616b9..f570c309d79 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -3847,40 +3847,40 @@ void Item_func_dyncol_create::prepare_arguments()
DBUG_ASSERT(args[valpos]->field_type() == MYSQL_TYPE_NULL);
break;
case DYN_COL_INT:
- vals[i].long_value= args[valpos]->val_int();
+ vals[i].x.long_value= args[valpos]->val_int();
break;
case DYN_COL_UINT:
- vals[i].ulong_value= args[valpos]->val_int();
+ vals[i].x.ulong_value= args[valpos]->val_int();
break;
case DYN_COL_DOUBLE:
- vals[i].double_value= args[valpos]->val_real();
+ vals[i].x.double_value= args[valpos]->val_real();
break;
case DYN_COL_STRING:
res= args[valpos]->val_str(&tmp);
if (res &&
- (vals[i].string_value.str= my_strndup(res->ptr(), res->length(),
+ (vals[i].x.string.value.str= my_strndup(res->ptr(), res->length(),
MYF(MY_WME))))
{
- vals[i].string_value.length= res->length();
- vals[i].charset= res->charset();
+ vals[i].x.string.value.length= res->length();
+ vals[i].x.string.charset= res->charset();
}
else
{
args[valpos]->null_value= 1; // In case of out of memory
- vals[i].string_value.str= NULL;
- vals[i].string_value.length= 0; // just to be safe
+ vals[i].x.string.value.str= NULL;
+ vals[i].x.string.value.length= 0; // just to be safe
}
break;
case DYN_COL_DECIMAL:
if ((dres= args[valpos]->val_decimal(&dtmp)))
{
dynamic_column_prepare_decimal(&vals[i]);
- DBUG_ASSERT(vals[i].decimal_value.len == dres->len);
- vals[i].decimal_value.intg= dres->intg;
- vals[i].decimal_value.frac= dres->frac;
- vals[i].decimal_value.sign= dres->sign();
- memcpy(vals[i].decimal_buffer, dres->buf,
- sizeof(vals[i].decimal_buffer));
+ DBUG_ASSERT(vals[i].x.decimal.value.len == dres->len);
+ vals[i].x.decimal.value.intg= dres->intg;
+ vals[i].x.decimal.value.frac= dres->frac;
+ vals[i].x.decimal.value.sign= dres->sign();
+ memcpy(vals[i].x.decimal.buffer, dres->buf,
+ sizeof(vals[i].x.decimal.buffer));
}
else
{
@@ -3889,13 +3889,13 @@ void Item_func_dyncol_create::prepare_arguments()
}
break;
case DYN_COL_DATETIME:
- args[valpos]->get_date(&vals[i].time_value, TIME_FUZZY_DATE);
+ args[valpos]->get_date(&vals[i].x.time_value, TIME_FUZZY_DATE);
break;
case DYN_COL_DATE:
- args[valpos]->get_date(&vals[i].time_value, TIME_FUZZY_DATE);
+ args[valpos]->get_date(&vals[i].x.time_value, TIME_FUZZY_DATE);
break;
case DYN_COL_TIME:
- args[valpos]->get_time(&vals[i].time_value);
+ args[valpos]->get_time(&vals[i].x.time_value);
break;
default:
DBUG_ASSERT(0);
@@ -3904,7 +3904,7 @@ void Item_func_dyncol_create::prepare_arguments()
if (vals[i].type != DYN_COL_NULL && args[valpos]->null_value)
{
if (vals[i].type == DYN_COL_STRING)
- my_free(vals[i].string_value.str);
+ my_free(vals[i].x.string.value.str);
vals[i].type= DYN_COL_NULL;
}
}
@@ -3918,7 +3918,7 @@ void Item_func_dyncol_create::cleanup_arguments()
for (i= 0; i < column_count; i++)
{
if (vals[i].type == DYN_COL_STRING)
- my_free(vals[i].string_value.str);
+ my_free(vals[i].x.string.value.str);
}
}
@@ -4135,19 +4135,19 @@ String *Item_dyncol_get::val_str(String *str_result)
goto null;
case DYN_COL_INT:
case DYN_COL_UINT:
- str_result->set_int(val.long_value, test(val.type == DYN_COL_UINT),
+ str_result->set_int(val.x.long_value, test(val.type == DYN_COL_UINT),
&my_charset_latin1);
break;
case DYN_COL_DOUBLE:
- str_result->set_real(val.double_value, NOT_FIXED_DEC, &my_charset_latin1);
+ str_result->set_real(val.x.double_value, NOT_FIXED_DEC, &my_charset_latin1);
break;
case DYN_COL_STRING:
- if ((char*) tmp.ptr() <= val.string_value.str &&
- (char*) tmp.ptr() + tmp.length() >= val.string_value.str)
+ if ((char*) tmp.ptr() <= val.x.string.value.str &&
+ (char*) tmp.ptr() + tmp.length() >= val.x.string.value.str)
{
/* value is allocated in tmp buffer; We have to make a copy */
- str_result->copy(val.string_value.str, val.string_value.length,
- val.charset);
+ str_result->copy(val.x.string.value.str, val.x.string.value.length,
+ val.x.string.charset);
}
else
{
@@ -4156,24 +4156,24 @@ String *Item_dyncol_get::val_str(String *str_result)
into a field or in a buffer for another item and this buffer
is not going to be deleted during expression evaluation
*/
- str_result->set(val.string_value.str, val.string_value.length,
- val.charset);
+ str_result->set(val.x.string.value.str, val.x.string.value.length,
+ val.x.string.charset);
}
break;
case DYN_COL_DECIMAL:
{
int res;
int length=
- my_decimal_string_length((const my_decimal*)&val.decimal_value);
+ my_decimal_string_length((const my_decimal*)&val.x.decimal.value);
if (str_result->alloc(length))
goto null;
- if ((res= decimal2string(&val.decimal_value, (char*) str_result->ptr(),
+ if ((res= decimal2string(&val.x.decimal.value, (char*) str_result->ptr(),
&length, 0, 0, ' ')) != E_DEC_OK)
{
char buff[40];
int len= sizeof(buff);
DBUG_ASSERT(length < (int)sizeof(buff));
- decimal2string(&val.decimal_value, buff, &len, 0, 0, ' ');
+ decimal2string(&val.x.decimal.value, buff, &len, 0, 0, ' ');
decimal_operation_results(res, buff, "CHAR");
}
str_result->set_charset(&my_charset_latin1);
@@ -4191,7 +4191,7 @@ String *Item_dyncol_get::val_str(String *str_result)
asked to return the time argument as a string.
*/
if (str_result->alloc(MAX_DATE_STRING_REP_LENGTH) ||
- !(length= my_TIME_to_str(&val.time_value, (char*) str_result->ptr(),
+ !(length= my_TIME_to_str(&val.x.time_value, (char*) str_result->ptr(),
AUTO_SEC_PART_DIGITS)))
goto null;
str_result->set_charset(&my_charset_latin1);
@@ -4221,20 +4221,20 @@ longlong Item_dyncol_get::val_int()
goto null;
case DYN_COL_UINT:
unsigned_flag= 1; // Make it possible for caller to detect sign
- return val.long_value;
+ return val.x.long_value;
case DYN_COL_INT:
unsigned_flag= 0; // Make it possible for caller to detect sign
- return val.long_value;
+ return val.x.long_value;
case DYN_COL_DOUBLE:
{
bool error;
longlong num;
- num= double_to_longlong(val.double_value, unsigned_flag, &error);
+ num= double_to_longlong(val.x.double_value, unsigned_flag, &error);
if (error)
{
char buff[30];
- sprintf(buff, "%lg", val.double_value);
+ sprintf(buff, "%lg", val.x.double_value);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_DATA_OVERFLOW,
ER(ER_DATA_OVERFLOW),
@@ -4247,14 +4247,14 @@ longlong Item_dyncol_get::val_int()
{
int error;
longlong num;
- char *end= val.string_value.str + val.string_value.length, *org_end= end;
+ char *end= val.x.string.value.str + val.x.string.value.length, *org_end= end;
- num= my_strtoll10(val.string_value.str, &end, &error);
+ num= my_strtoll10(val.x.string.value.str, &end, &error);
if (end != org_end || error > 0)
{
char buff[80];
- strmake(buff, val.string_value.str, min(sizeof(buff)-1,
- val.string_value.length));
+ strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ val.x.string.value.length));
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER(ER_BAD_DATA),
@@ -4267,18 +4267,18 @@ longlong Item_dyncol_get::val_int()
case DYN_COL_DECIMAL:
{
longlong num;
- my_decimal2int(E_DEC_FATAL_ERROR, &val.decimal_value, unsigned_flag,
+ my_decimal2int(E_DEC_FATAL_ERROR, &val.x.decimal.value, unsigned_flag,
&num);
return num;
}
case DYN_COL_DATETIME:
case DYN_COL_DATE:
case DYN_COL_TIME:
- unsigned_flag= !val.time_value.neg;
+ unsigned_flag= !val.x.time_value.neg;
if (unsigned_flag)
- return TIME_to_ulonglong(&val.time_value);
+ return TIME_to_ulonglong(&val.x.time_value);
else
- return -(longlong)TIME_to_ulonglong(&val.time_value);
+ return -(longlong)TIME_to_ulonglong(&val.x.time_value);
}
null:
@@ -4300,24 +4300,24 @@ double Item_dyncol_get::val_real()
case DYN_COL_NULL:
goto null;
case DYN_COL_UINT:
- return ulonglong2double(val.ulong_value);
+ return ulonglong2double(val.x.ulong_value);
case DYN_COL_INT:
- return (double) val.long_value;
+ return (double) val.x.long_value;
case DYN_COL_DOUBLE:
- return (double) val.double_value;
+ return (double) val.x.double_value;
case DYN_COL_STRING:
{
int error;
char *end;
- double res= my_strntod(val.charset, (char*) val.string_value.str,
- val.string_value.length, &end, &error);
+ double res= my_strntod(val.x.string.charset, (char*) val.x.string.value.str,
+ val.x.string.value.length, &end, &error);
- if (end != (char*) val.string_value.str + val.string_value.length ||
+ if (end != (char*) val.x.string.value.str + val.x.string.value.length ||
error)
{
char buff[80];
- strmake(buff, val.string_value.str, min(sizeof(buff)-1,
- val.string_value.length));
+ strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ val.x.string.value.length));
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER(ER_BAD_DATA),
@@ -4329,13 +4329,13 @@ double Item_dyncol_get::val_real()
{
double res;
/* This will always succeed */
- decimal2double(&val.decimal_value, &res);
+ decimal2double(&val.x.decimal.value, &res);
return res;
}
case DYN_COL_DATETIME:
case DYN_COL_DATE:
case DYN_COL_TIME:
- return TIME_to_double(&val.time_value);
+ return TIME_to_double(&val.x.time_value);
}
null:
@@ -4357,22 +4357,22 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
case DYN_COL_NULL:
goto null;
case DYN_COL_UINT:
- int2my_decimal(E_DEC_FATAL_ERROR, val.long_value, TRUE, decimal_value);
+ int2my_decimal(E_DEC_FATAL_ERROR, val.x.long_value, TRUE, decimal_value);
break;
case DYN_COL_INT:
- int2my_decimal(E_DEC_FATAL_ERROR, val.long_value, FALSE, decimal_value);
+ int2my_decimal(E_DEC_FATAL_ERROR, val.x.long_value, FALSE, decimal_value);
break;
case DYN_COL_DOUBLE:
- double2my_decimal(E_DEC_FATAL_ERROR, val.double_value, decimal_value);
+ double2my_decimal(E_DEC_FATAL_ERROR, val.x.double_value, decimal_value);
break;
case DYN_COL_STRING:
{
int rc;
- rc= str2my_decimal(0, val.string_value.str, val.string_value.length,
- val.charset, decimal_value);
+ rc= str2my_decimal(0, val.x.string.value.str, val.x.string.value.length,
+ val.x.string.charset, decimal_value);
char buff[80];
- strmake(buff, val.string_value.str, min(sizeof(buff)-1,
- val.string_value.length));
+ strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ val.x.string.value.length));
if (rc != E_DEC_OK)
{
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -4383,14 +4383,14 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
break;
}
case DYN_COL_DECIMAL:
- decimal2my_decimal(&val.decimal_value, decimal_value);
+ decimal2my_decimal(&val.x.decimal.value, decimal_value);
break;
case DYN_COL_DATETIME:
case DYN_COL_DATE:
case DYN_COL_TIME:
- decimal_value= seconds2my_decimal(val.time_value.neg,
- TIME_to_ulonglong(&val.time_value),
- val.time_value.second_part,
+ decimal_value= seconds2my_decimal(val.x.time_value.neg,
+ TIME_to_ulonglong(&val.x.time_value),
+ val.x.time_value.second_part,
decimal_value);
break;
}
@@ -4419,37 +4419,37 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
signed_value= 1; // For error message
/* fall_trough */
case DYN_COL_UINT:
- if (signed_value || val.ulong_value <= LONGLONG_MAX)
+ if (signed_value || val.x.ulong_value <= LONGLONG_MAX)
{
- if (int_to_datetime_with_warn(val.ulong_value, ltime, fuzzy_date,
+ if (int_to_datetime_with_warn(val.x.ulong_value, ltime, fuzzy_date,
0 /* TODO */))
goto null;
return 0;
}
/* let double_to_datetime_with_warn() issue the warning message */
- val.double_value= static_cast<double>(ULONGLONG_MAX);
+ val.x.double_value= static_cast<double>(ULONGLONG_MAX);
/* fall_trough */
case DYN_COL_DOUBLE:
- if (double_to_datetime_with_warn(val.double_value, ltime, fuzzy_date,
+ if (double_to_datetime_with_warn(val.x.double_value, ltime, fuzzy_date,
0 /* TODO */))
goto null;
return 0;
case DYN_COL_DECIMAL:
- if (decimal_to_datetime_with_warn((my_decimal*)&val.decimal_value, ltime,
+ if (decimal_to_datetime_with_warn((my_decimal*)&val.x.decimal.value, ltime,
fuzzy_date, 0 /* TODO */))
goto null;
return 0;
case DYN_COL_STRING:
if (str_to_datetime_with_warn(&my_charset_numeric,
- val.string_value.str,
- val.string_value.length,
+ val.x.string.value.str,
+ val.x.string.value.length,
ltime, fuzzy_date) <= MYSQL_TIMESTAMP_ERROR)
goto null;
return 0;
case DYN_COL_DATETIME:
case DYN_COL_DATE:
case DYN_COL_TIME:
- *ltime= val.time_value;
+ *ltime= val.x.time_value;
return 0;
}
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 40f1e753d60..f25722ab30b 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -168,9 +168,11 @@ void Item_in_subselect::cleanup()
delete left_expr_cache;
left_expr_cache= NULL;
}
+ /*
+ TODO: This breaks the commented assert in add_strategy().
+ in_strategy&= ~SUBS_STRATEGY_CHOSEN;
+ */
first_execution= TRUE;
- if (in_strategy & SUBS_MATERIALIZATION)
- in_strategy= 0;
pushed_cond_guards= NULL;
Item_subselect::cleanup();
DBUG_VOID_RETURN;
@@ -186,10 +188,9 @@ void Item_allany_subselect::cleanup()
*/
for (SELECT_LEX *sl= unit->first_select();
sl; sl= sl->next_select())
- if (in_strategy & SUBS_MAXMIN_INJECTED)
+ if (test_strategy(SUBS_MAXMIN_INJECTED))
sl->with_sum_func= false;
Item_in_subselect::cleanup();
-
}
@@ -239,7 +240,7 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
if (check_stack_overrun(thd, STACK_MIN_SIZE, (uchar*)&res))
return TRUE;
-
+
if (!(res= engine->prepare()))
{
// all transformation is done (used by prepared statements)
@@ -502,6 +503,7 @@ void Item_subselect::recalc_used_tables(st_select_lex *new_parent,
upper->item->walk(&Item::enumerate_field_refs_processor, FALSE,
(uchar*)&fixer);
used_tables_cache |= fixer.used_tables;
+ upper->item->walk(&Item::update_table_bitmaps_processor, FALSE, NULL);
/*
if (after_pullout)
upper->item->fix_after_pullout(new_parent, &(upper->item));
@@ -524,6 +526,20 @@ void Item_subselect::recalc_used_tables(st_select_lex *new_parent,
bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
uchar *argument)
{
+ if (!(unit->uncacheable & ~UNCACHEABLE_DEPENDENT) && engine->is_executed() &&
+ !unit->describe)
+ {
+ /*
+ The subquery has already been executed (for real, it wasn't EXPLAIN's
+ fake execution) so it should not matter what it has inside.
+
+ The actual reason for not walking inside is that parts of the subquery
+ (e.g. JTBM join nests and their IN-equality conditions may have been
+ invalidated by irreversible cleanups (those happen after an uncorrelated
+ subquery has been executed).
+ */
+ return (this->*processor)(argument);
+ }
if (walk_subquery)
{
@@ -596,7 +612,9 @@ bool Item_subselect::exec()
void Item_subselect::get_cache_parameters(List<Item> &parameters)
{
- Collect_deps_prm prm= { unit->first_select()->nest_level, &parameters };
+ Collect_deps_prm prm= {&parameters,
+ unit->first_select()->nest_level_base,
+ unit->first_select()->nest_level};
walk(&Item::collect_outer_ref_processor, TRUE, (uchar*)&prm);
}
@@ -717,7 +735,7 @@ bool Item_in_subselect::exec()
- on a cost-based basis, that takes into account the cost of a cache
lookup, the cache hit rate, and the savings per cache hit.
*/
- if (!left_expr_cache && (in_strategy & SUBS_MATERIALIZATION))
+ if (!left_expr_cache && (test_strategy(SUBS_MATERIALIZATION)))
init_left_expr_cache();
/*
@@ -834,7 +852,10 @@ Item_maxmin_subselect::Item_maxmin_subselect(THD *thd_param,
{
DBUG_ENTER("Item_maxmin_subselect::Item_maxmin_subselect");
max= max_arg;
- init(select_lex, new select_max_min_finder_subselect(this, max_arg));
+ init(select_lex,
+ new select_max_min_finder_subselect(this, max_arg,
+ parent->substype() ==
+ Item_subselect::ALL_SUBS));
max_columns= 1;
maybe_null= 1;
max_columns= 1;
@@ -1177,8 +1198,9 @@ bool Item_in_subselect::test_limit(st_select_lex_unit *unit_arg)
Item_in_subselect::Item_in_subselect(Item * left_exp,
st_select_lex *select_lex):
- Item_exists_subselect(), left_expr_cache(0), first_execution(TRUE),
- optimizer(0), pushed_cond_guards(NULL), in_strategy(0),
+ Item_exists_subselect(),
+ left_expr_cache(0), first_execution(TRUE), in_strategy(SUBS_NOT_TRANSFORMED),
+ optimizer(0), pushed_cond_guards(NULL), emb_on_expr_nest(NULL),
is_jtbm_merged(FALSE), is_flattenable_semijoin(FALSE),
is_registered_semijoin(FALSE),
upper_item(0)
@@ -1599,7 +1621,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
bool Item_allany_subselect::transform_into_max_min(JOIN *join)
{
DBUG_ENTER("Item_allany_subselect::transform_into_max_min");
- if (!(in_strategy & (SUBS_MAXMIN_INJECTED | SUBS_MAXMIN_ENGINE)))
+ if (!test_strategy(SUBS_MAXMIN_INJECTED | SUBS_MAXMIN_ENGINE))
DBUG_RETURN(false);
Item **place= optimizer->arguments() + 1;
THD *thd= join->thd;
@@ -1610,11 +1632,20 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
*/
DBUG_ASSERT(!substitution);
- if (!select_lex->group_list.elements &&
- !select_lex->having &&
- !select_lex->with_sum_func &&
- !(select_lex->next_select()) &&
- select_lex->table_list.elements)
+ /*
+ Check if optimization with aggregate min/max possible
+ 1 There is no aggregate in the subquery
+ 2 It is not UNION
+ 3 There is tables
+ 4 It is not ALL subquery with possible NULLs in the SELECT list
+ */
+ if (!select_lex->group_list.elements && /*1*/
+ !select_lex->having && /*1*/
+ !select_lex->with_sum_func && /*1*/
+ !(select_lex->next_select()) && /*2*/
+ select_lex->table_list.elements && /*3*/
+ (!select_lex->ref_pointer_array[0]->maybe_null || /*4*/
+ substype() != Item_subselect::ALL_SUBS)) /*4*/
{
Item_sum_hybrid *item;
nesting_map save_allow_sum_func;
@@ -1664,7 +1695,7 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
Remove other strategies if any (we already changed the query and
can't apply other strategy).
*/
- in_strategy= SUBS_MAXMIN_INJECTED;
+ set_strategy(SUBS_MAXMIN_INJECTED);
}
else
{
@@ -1676,13 +1707,13 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
Remove other strategies if any (we already changed the query and
can't apply other strategy).
*/
- in_strategy= SUBS_MAXMIN_ENGINE;
+ set_strategy(SUBS_MAXMIN_ENGINE);
}
/*
The swap is needed for expressions of type 'f1 < ALL ( SELECT ....)'
where we want to evaluate the sub query even if f1 would be null.
*/
- subs= func->create_swap(left_expr, subs);
+ subs= func->create_swap(*(optimizer->get_cache()), subs);
thd->change_item_tree(place, subs);
if (subs->fix_fields(thd, &subs))
DBUG_RETURN(true);
@@ -2263,7 +2294,12 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg)
{
/* The argument list of the top-level AND may change after fix fields. */
and_args= ((Item_cond*) join_arg->conds)->argument_list();
- and_args->concat((List<Item> *) &join_arg->cond_equal->current_level);
+ List_iterator<Item_equal> li(join_arg->cond_equal->current_level);
+ Item_equal *elem;
+ while ((elem= li++))
+ {
+ and_args->push_back(elem);
+ }
}
}
@@ -2391,7 +2427,7 @@ err:
void Item_in_subselect::print(String *str, enum_query_type query_type)
{
- if (in_strategy & SUBS_IN_TO_EXISTS)
+ if (test_strategy(SUBS_IN_TO_EXISTS))
str->append(STRING_WITH_LEN("<exists>"));
else
{
@@ -2407,7 +2443,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
uint outer_cols_num;
List<Item> *inner_cols;
- if (in_strategy & SUBS_SEMI_JOIN)
+ if (test_strategy(SUBS_SEMI_JOIN))
return !( (*ref)= new Item_int(1));
/*
@@ -2461,7 +2497,6 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
return TRUE;
if (Item_subselect::fix_fields(thd_arg, ref))
return TRUE;
-
fixed= TRUE;
return FALSE;
}
@@ -2471,6 +2506,7 @@ void Item_in_subselect::fix_after_pullout(st_select_lex *new_parent, Item **ref)
{
left_expr->fix_after_pullout(new_parent, &left_expr);
Item_subselect::fix_after_pullout(new_parent, ref);
+ used_tables_cache |= left_expr->used_tables();
}
void Item_in_subselect::update_used_tables()
@@ -2583,8 +2619,7 @@ Item_allany_subselect::select_transformer(JOIN *join)
{
DBUG_ENTER("Item_allany_subselect::select_transformer");
DBUG_ASSERT((in_strategy & ~(SUBS_MAXMIN_INJECTED | SUBS_MAXMIN_ENGINE |
- SUBS_IN_TO_EXISTS)) == 0);
- in_strategy|= SUBS_IN_TO_EXISTS;
+ SUBS_IN_TO_EXISTS | SUBS_STRATEGY_CHOSEN)) == 0);
if (upper_item)
upper_item->show= 1;
DBUG_RETURN(select_in_like_transformer(join));
@@ -2593,7 +2628,7 @@ Item_allany_subselect::select_transformer(JOIN *join)
void Item_allany_subselect::print(String *str, enum_query_type query_type)
{
- if (in_strategy & SUBS_IN_TO_EXISTS)
+ if (test_strategy(SUBS_IN_TO_EXISTS))
str->append(STRING_WITH_LEN("<exists>"));
else
{
@@ -2979,7 +3014,7 @@ int subselect_single_select_engine::exec()
executed= 1;
thd->where= save_where;
thd->lex->current_select= save_select;
- DBUG_RETURN(join->error||thd->is_fatal_error);
+ DBUG_RETURN(join->error || thd->is_fatal_error || thd->is_error());
}
thd->where= save_where;
thd->lex->current_select= save_select;
@@ -3917,6 +3952,8 @@ subselect_hash_sj_engine::get_strategy_using_data()
}
if (result_sink->get_null_count_of_col(i) == tmp_table->file->stats.records)
++count_null_only_columns;
+ if (result_sink->get_null_count_of_col(i))
+ ++count_columns_with_nulls;
}
/* If no column contains NULLs use regular hash index lookups. */
@@ -4395,7 +4432,13 @@ double get_fanout_with_deps(JOIN *join, table_map tset)
for (JOIN_TAB *tab= first_top_level_tab(join, WITHOUT_CONST_TABLES); tab;
tab= next_top_level_tab(join, tab))
{
- if ((tab->table->map & checked_deps) && !tab->emb_sj_nest &&
+ /*
+ Ignore SJM nests. They have tab->table==NULL. There is no point to walk
+ inside them, because GROUP BY clause cannot refer to tables from within
+ subquery.
+ */
+ if (!tab->is_sjm_nest() && (tab->table->map & checked_deps) &&
+ !tab->emb_sj_nest &&
tab->records_read != 0)
{
fanout *= rows2double(tab->records_read);
@@ -4585,7 +4628,8 @@ int subselect_hash_sj_engine::exec()
/* The subquery should be optimized, and materialized only once. */
DBUG_ASSERT(materialize_join->optimized && !is_materialized);
materialize_join->exec();
- if ((res= test(materialize_join->error || thd->is_fatal_error)))
+ if ((res= test(materialize_join->error || thd->is_fatal_error ||
+ thd->is_error())))
goto err;
/*
@@ -4681,6 +4725,7 @@ int subselect_hash_sj_engine::exec()
count_pm_keys,
has_covering_null_row,
has_covering_null_columns,
+ count_columns_with_nulls,
item, result,
semi_join_conds->argument_list());
if (!pm_engine ||
@@ -4706,7 +4751,8 @@ int subselect_hash_sj_engine::exec()
item, result,
semi_join_conds->argument_list(),
has_covering_null_row,
- has_covering_null_columns)))
+ has_covering_null_columns,
+ count_columns_with_nulls)))
{
/* This is an irrecoverable error. */
res= 1;
@@ -4825,14 +4871,14 @@ bool Ordered_key::init(MY_BITMAP *columns_to_index)
Item_func_lt *fn_less_than;
key_column_count= bitmap_bits_set(columns_to_index);
-
- // TIMOUR: check for mem allocation err, revert to scan
-
key_columns= (Item_field**) thd->alloc(key_column_count *
sizeof(Item_field*));
compare_pred= (Item_func_lt**) thd->alloc(key_column_count *
sizeof(Item_func_lt*));
+ if (!key_columns || !compare_pred)
+ return TRUE; /* Revert to table scan partial match. */
+
for (uint i= 0; i < columns_to_index->n_bits; i++)
{
if (!bitmap_is_set(columns_to_index, i))
@@ -5143,43 +5189,48 @@ subselect_partial_match_engine::subselect_partial_match_engine(
select_result_interceptor *result_arg,
List<Item> *equi_join_conds_arg,
bool has_covering_null_row_arg,
- bool has_covering_null_columns_arg)
+ bool has_covering_null_columns_arg,
+ uint count_columns_with_nulls_arg)
:subselect_engine(thd_arg, item_arg, result_arg),
tmp_table(tmp_table_arg), lookup_engine(engine_arg),
equi_join_conds(equi_join_conds_arg),
has_covering_null_row(has_covering_null_row_arg),
- has_covering_null_columns(has_covering_null_columns_arg)
+ has_covering_null_columns(has_covering_null_columns_arg),
+ count_columns_with_nulls(count_columns_with_nulls_arg)
{}
int subselect_partial_match_engine::exec()
{
Item_in_subselect *item_in= (Item_in_subselect *) item;
- int res;
+ int copy_res, lookup_res;
/* Try to find a matching row by index lookup. */
- res= lookup_engine->copy_ref_key_simple();
- if (res == -1)
+ copy_res= lookup_engine->copy_ref_key_simple();
+ if (copy_res == -1)
{
/* The result is FALSE based on the outer reference. */
item_in->value= 0;
item_in->null_value= 0;
return 0;
}
- else if (res == 0)
+ else if (copy_res == 0)
{
/* Search for a complete match. */
- if ((res= lookup_engine->index_lookup()))
+ if ((lookup_res= lookup_engine->index_lookup()))
{
/* An error occured during lookup(). */
item_in->value= 0;
item_in->null_value= 0;
- return res;
+ return lookup_res;
}
- else if (item_in->value)
+ else if (item_in->value || !count_columns_with_nulls)
{
/*
A complete match was found, the result of IN is TRUE.
+ If no match was found, and there are no NULLs in the materialized
+ subquery, then the result is guaranteed to be false because this
+ branch is executed when the outer reference has no NULLs as well.
Notice: (this->item == lookup_engine->item)
*/
return 0;
@@ -5287,10 +5338,13 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
merge_keys_count == 1 && non_null_key_parts));
/*
Allocate buffers to hold the merged keys and the mapping between rowids and
- row numbers.
+ row numbers. All small buffers are allocated in the runtime memroot. Big
+ buffers are allocated from the OS via malloc.
*/
if (!(merge_keys= (Ordered_key**) thd->alloc(merge_keys_count *
sizeof(Ordered_key*))) ||
+ !(null_bitmaps= (MY_BITMAP**) thd->alloc(merge_keys_count *
+ sizeof(MY_BITMAP*))) ||
!(row_num_to_rowid= (uchar*) my_malloc((size_t)(row_count * rowid_length),
MYF(MY_WME))))
return TRUE;
@@ -5508,6 +5562,56 @@ bool subselect_rowid_merge_engine::test_null_row(rownum_t row_num)
}
+/**
+ Test if a subset of NULL-able columns contains a row of NULLs.
+*/
+
+bool subselect_rowid_merge_engine::
+exists_complementing_null_row(MY_BITMAP *keys_to_complement)
+{
+ rownum_t highest_min_row= 0;
+ rownum_t lowest_max_row= UINT_MAX;
+ uint count_null_keys, i, j;
+ Ordered_key *cur_key;
+
+ count_null_keys= keys_to_complement->n_bits -
+ bitmap_bits_set(keys_to_complement);
+ if (count_null_keys == 1)
+ {
+ /*
+ The caller guarantees that the complement to keys_to_complement
+ contains only columns with NULLs. Therefore if there is only one column,
+ it is guaranteed to contain NULLs.
+ */
+ return TRUE;
+ }
+
+ for (i= (non_null_key ? 1 : 0), j= 0; i < merge_keys_count; i++)
+ {
+ cur_key= merge_keys[i];
+ if (bitmap_is_set(keys_to_complement, cur_key->get_keyid()))
+ continue;
+ DBUG_ASSERT(cur_key->get_null_count());
+ if (cur_key->get_min_null_row() > highest_min_row)
+ highest_min_row= cur_key->get_min_null_row();
+ if (cur_key->get_max_null_row() < lowest_max_row)
+ lowest_max_row= cur_key->get_max_null_row();
+ null_bitmaps[j++]= cur_key->get_null_key();
+ }
+ DBUG_ASSERT(count_null_keys == j);
+
+ if (lowest_max_row < highest_min_row)
+ {
+ /* The intersection of NULL rows is empty. */
+ return FALSE;
+ }
+
+ return bitmap_exists_intersection((const MY_BITMAP**) null_bitmaps,
+ count_null_keys,
+ highest_min_row, lowest_max_row);
+}
+
+
/*
@retval TRUE there is a partial match (UNKNOWN)
@retval FALSE there is no match at all (FALSE)
@@ -5520,6 +5624,8 @@ bool subselect_rowid_merge_engine::partial_match()
Ordered_key *cur_key;
rownum_t cur_row_num;
uint count_nulls_in_search_key= 0;
+ uint max_null_in_any_row=
+ ((select_materialize_with_stats *) result)->get_max_nulls_in_row();
bool res= FALSE;
/* If there is a non-NULL key, it must be the first key in the keys array. */
@@ -5571,22 +5677,53 @@ bool subselect_rowid_merge_engine::partial_match()
/*
If the outer reference consists of only NULLs, or if it has NULLs in all
- nullable columns, the result is UNKNOWN.
+ nullable columns (above we guarantee there is a match for the non-null
+ coumns), the result is UNKNOWN.
*/
- if (count_nulls_in_search_key ==
- ((Item_in_subselect *) item)->left_expr->cols() -
- (non_null_key ? non_null_key->get_column_count() : 0))
+ if (count_nulls_in_search_key == merge_keys_count - test(non_null_key))
{
res= TRUE;
goto end;
}
/*
+ If the outer row has NULLs in some columns, and
+ there is no match for any of the remaining columns, and
+ there is a subquery row with NULLs in all unmatched columns,
+ then there is a partial match, otherwise the result is FALSE.
+ */
+ if (count_nulls_in_search_key && !pq.elements)
+ {
+ DBUG_ASSERT(!non_null_key);
+ /*
+ Check if the intersection of all NULL bitmaps of all keys that
+ are not in matching_outer_cols is non-empty.
+ */
+ res= exists_complementing_null_row(&matching_outer_cols);
+ goto end;
+ }
+
+ /*
If there is no NULL (sub)row that covers all NULL columns, and there is no
- single match for any of the NULL columns, the result is FALSE.
+ match for any of the NULL columns, the result is FALSE. Notice that if there
+ is a non-null key, and there is only one matching key, the non-null key is
+ the matching key. This is so, because this method returns FALSE if the
+ non-null key doesn't have a match.
*/
- if (pq.elements - test(non_null_key) == 0)
+ if (!count_nulls_in_search_key &&
+ (!pq.elements ||
+ (pq.elements == 1 && non_null_key &&
+ max_null_in_any_row < merge_keys_count-1)))
{
+ if (!pq.elements)
+ {
+ DBUG_ASSERT(!non_null_key);
+ /*
+ The case of a covering null row is handled by
+ subselect_partial_match_engine::exec()
+ */
+ DBUG_ASSERT(max_null_in_any_row != tmp_table->s->fields);
+ }
res= FALSE;
goto end;
}
@@ -5630,6 +5767,7 @@ bool subselect_rowid_merge_engine::partial_match()
{
min_key= cur_key;
min_row_num= cur_row_num;
+ bitmap_clear_all(&matching_keys);
bitmap_set_bit(&matching_keys, min_key->get_keyid());
bitmap_union(&matching_keys, &matching_outer_cols);
}
@@ -5650,6 +5788,8 @@ bool subselect_rowid_merge_engine::partial_match()
DBUG_ASSERT(FALSE);
end:
+ if (!has_covering_null_columns)
+ bitmap_clear_all(&matching_keys);
queue_remove_all(&pq);
tmp_table->file->ha_rnd_end();
return res;
@@ -5663,11 +5803,13 @@ subselect_table_scan_engine::subselect_table_scan_engine(
select_result_interceptor *result_arg,
List<Item> *equi_join_conds_arg,
bool has_covering_null_row_arg,
- bool has_covering_null_columns_arg)
+ bool has_covering_null_columns_arg,
+ uint count_columns_with_nulls_arg)
:subselect_partial_match_engine(thd_arg, engine_arg, tmp_table_arg, item_arg,
result_arg, equi_join_conds_arg,
has_covering_null_row_arg,
- has_covering_null_columns_arg)
+ has_covering_null_columns_arg,
+ count_columns_with_nulls_arg)
{}
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 5beabed9182..6007812fa7d 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -240,6 +240,7 @@ public:
const char *func_name() const { DBUG_ASSERT(0); return "subselect"; }
virtual bool expr_cache_is_needed(THD *);
virtual void get_cache_parameters(List<Item> &parameters);
+ virtual bool is_subquery_processor (uchar *opt_arg) { return 1; }
friend class select_result_interceptor;
friend class Item_in_optimizer;
@@ -367,17 +368,19 @@ TABLE_LIST * const NO_JOIN_NEST=(TABLE_LIST*)0x1;
based on user-set optimizer switches, semantic analysis and cost comparison.
*/
#define SUBS_NOT_TRANSFORMED 0 /* No execution method was chosen for this IN. */
-#define SUBS_SEMI_JOIN 1 /* IN was converted to semi-join. */
-#define SUBS_IN_TO_EXISTS 2 /* IN was converted to correlated EXISTS. */
-#define SUBS_MATERIALIZATION 4 /* Execute IN via subquery materialization. */
+/* The Final decision about the strategy is made. */
+#define SUBS_STRATEGY_CHOSEN 1
+#define SUBS_SEMI_JOIN 2 /* IN was converted to semi-join. */
+#define SUBS_IN_TO_EXISTS 4 /* IN was converted to correlated EXISTS. */
+#define SUBS_MATERIALIZATION 8 /* Execute IN via subquery materialization. */
/* Partial matching substrategies of MATERIALIZATION. */
-#define SUBS_PARTIAL_MATCH_ROWID_MERGE 8
-#define SUBS_PARTIAL_MATCH_TABLE_SCAN 16
+#define SUBS_PARTIAL_MATCH_ROWID_MERGE 16
+#define SUBS_PARTIAL_MATCH_TABLE_SCAN 32
/* ALL/ANY will be transformed with max/min optimization */
/* The subquery has not aggregates, transform it into a MAX/MIN query. */
-#define SUBS_MAXMIN_INJECTED 32
+#define SUBS_MAXMIN_INJECTED 64
/* The subquery has aggregates, use a special max/min subselect engine. */
-#define SUBS_MAXMIN_ENGINE 64
+#define SUBS_MAXMIN_ENGINE 128
/**
@@ -412,6 +415,8 @@ protected:
Item *expr;
bool was_null;
bool abort_on_null;
+ /* A bitmap of possible execution strategies for an IN predicate. */
+ uchar in_strategy;
public:
Item_in_optimizer *optimizer;
protected:
@@ -442,7 +447,6 @@ public:
join nest pointer - the predicate is an AND-part of ON expression
of a join nest
NULL - for all other locations
- See also THD::emb_on_expr_nest.
*/
TABLE_LIST *emb_on_expr_nest;
/*
@@ -457,11 +461,7 @@ public:
*/
bool sjm_scan_allowed;
double jtbm_read_time;
- double jtbm_record_count;
-
- /* A bitmap of possible execution strategies for an IN predicate. */
- uchar in_strategy;
-
+ double jtbm_record_count;
bool is_jtbm_merged;
/*
@@ -473,7 +473,7 @@ public:
TRUE<=>registered in the list of semijoins in outer select
*/
bool is_registered_semijoin;
-
+
/*
Used to determine how this subselect item is represented in the item tree,
in case there is a need to locate it there and replace with something else.
@@ -502,8 +502,8 @@ public:
Item_in_subselect(Item * left_expr, st_select_lex *select_lex);
Item_in_subselect()
:Item_exists_subselect(), left_expr_cache(0), first_execution(TRUE),
- abort_on_null(0), optimizer(0),
- pushed_cond_guards(NULL), func(NULL), in_strategy(SUBS_NOT_TRANSFORMED),
+ abort_on_null(0), in_strategy(SUBS_NOT_TRANSFORMED), optimizer(0),
+ pushed_cond_guards(NULL), func(NULL), emb_on_expr_nest(NULL),
is_jtbm_merged(FALSE),
upper_item(0)
{}
@@ -547,6 +547,70 @@ public:
user.
*/
int get_identifier();
+
+ void mark_as_condition_AND_part(TABLE_LIST *embedding)
+ {
+ emb_on_expr_nest= embedding;
+ }
+
+ bool test_strategy(uchar strategy)
+ { return test(in_strategy & strategy); }
+
+ /**
+ Test that the IN strategy was chosen for execution. This is so
+ when the CHOSEN flag is ON, and there is no other strategy.
+ */
+ bool test_set_strategy(uchar strategy)
+ {
+ DBUG_ASSERT(strategy == SUBS_SEMI_JOIN ||
+ strategy == SUBS_IN_TO_EXISTS ||
+ strategy == SUBS_MATERIALIZATION ||
+ strategy == SUBS_PARTIAL_MATCH_ROWID_MERGE ||
+ strategy == SUBS_PARTIAL_MATCH_TABLE_SCAN ||
+ strategy == SUBS_MAXMIN_INJECTED ||
+ strategy == SUBS_MAXMIN_ENGINE);
+ return ((in_strategy & SUBS_STRATEGY_CHOSEN) &&
+ (in_strategy & ~SUBS_STRATEGY_CHOSEN) == strategy);
+ }
+
+ bool is_set_strategy()
+ { return test(in_strategy & SUBS_STRATEGY_CHOSEN); }
+
+ bool has_strategy()
+ { return in_strategy != SUBS_NOT_TRANSFORMED; }
+
+ void add_strategy (uchar strategy)
+ {
+ DBUG_ASSERT(strategy != SUBS_NOT_TRANSFORMED);
+ DBUG_ASSERT(!(strategy & SUBS_STRATEGY_CHOSEN));
+ /*
+ TODO: PS re-execution breaks this condition, because
+ check_and_do_in_subquery_rewrites() is called for each reexecution
+ and re-adds the same strategies.
+ DBUG_ASSERT(!(in_strategy & SUBS_STRATEGY_CHOSEN));
+ */
+ in_strategy|= strategy;
+ }
+
+ void reset_strategy(uchar strategy)
+ {
+ DBUG_ASSERT(strategy != SUBS_NOT_TRANSFORMED);
+ in_strategy= strategy;
+ }
+
+ void set_strategy(uchar strategy)
+ {
+ /* Check that only one strategy is set for execution. */
+ DBUG_ASSERT(strategy == SUBS_SEMI_JOIN ||
+ strategy == SUBS_IN_TO_EXISTS ||
+ strategy == SUBS_MATERIALIZATION ||
+ strategy == SUBS_PARTIAL_MATCH_ROWID_MERGE ||
+ strategy == SUBS_PARTIAL_MATCH_TABLE_SCAN ||
+ strategy == SUBS_MAXMIN_INJECTED ||
+ strategy == SUBS_MAXMIN_ENGINE);
+ in_strategy= (SUBS_STRATEGY_CHOSEN | strategy);
+ }
+
friend class Item_ref_null_helper;
friend class Item_is_not_null_test;
friend class Item_in_optimizer;
@@ -893,7 +957,7 @@ public:
tmp_table(NULL), is_materialized(FALSE), materialize_engine(old_engine),
materialize_join(NULL), semi_join_conds(NULL), lookup_engine(NULL),
count_partial_match_columns(0), count_null_only_columns(0),
- strategy(UNDEFINED)
+ count_columns_with_nulls(0), strategy(UNDEFINED)
{}
~subselect_hash_sj_engine();
@@ -931,6 +995,7 @@ protected:
MY_BITMAP partial_match_key_parts;
uint count_partial_match_columns;
uint count_null_only_columns;
+ uint count_columns_with_nulls;
/* Possible execution strategies that can be used to compute hash semi-join.*/
enum exec_strategy {
UNDEFINED,
@@ -957,7 +1022,7 @@ protected:
/*
- Distinguish the type od (0-based) row numbers from the type of the index into
+ Distinguish the type of (0-based) row numbers from the type of the index into
an array of row numbers.
*/
typedef ha_rows rownum_t;
@@ -1033,9 +1098,9 @@ protected:
/* Count of NULLs per column. */
ha_rows null_count;
/* The row number that contains the first NULL in a column. */
- ha_rows min_null_row;
+ rownum_t min_null_row;
/* The row number that contains the last NULL in a column. */
- ha_rows max_null_row;
+ rownum_t max_null_row;
protected:
bool alloc_keys_buffers();
@@ -1068,6 +1133,10 @@ public:
DBUG_ASSERT(i < key_column_count);
return key_columns[i]->field->field_index;
}
+ rownum_t get_min_null_row() { return min_null_row; }
+ rownum_t get_max_null_row() { return max_null_row; }
+ MY_BITMAP * get_null_key() { return &null_key; }
+ ha_rows get_null_count() { return null_count; }
/*
Get the search key element that corresponds to the i-th key part of this
index.
@@ -1168,6 +1237,7 @@ protected:
guaranteed partial match.
*/
bool has_covering_null_columns;
+ uint count_columns_with_nulls;
protected:
virtual bool partial_match()= 0;
@@ -1178,7 +1248,8 @@ public:
select_result_interceptor *result_arg,
List<Item> *equi_join_conds_arg,
bool has_covering_null_row_arg,
- bool has_covering_null_columns_arg);
+ bool has_covering_null_columns_arg,
+ uint count_columns_with_nulls_arg);
int prepare() { return 0; }
int exec();
void fix_length_and_dec(Item_cache**) {}
@@ -1236,6 +1307,8 @@ protected:
Ordered_key **merge_keys;
/* The number of elements in merge_keys. */
uint merge_keys_count;
+ /* The NULL bitmaps of merge keys.*/
+ MY_BITMAP **null_bitmaps;
/*
An index on all non-NULL columns of 'tmp_table'. The index has the
logical form: <[v_i1 | ... | v_ik], rownum>. It allows to find the row
@@ -1261,6 +1334,7 @@ protected:
static int cmp_keys_by_cur_rownum(void *arg, uchar *k1, uchar *k2);
bool test_null_row(rownum_t row_num);
+ bool exists_complementing_null_row(MY_BITMAP *keys_to_complement);
bool partial_match();
public:
subselect_rowid_merge_engine(THD *thd_arg,
@@ -1268,13 +1342,15 @@ public:
TABLE *tmp_table_arg, uint merge_keys_count_arg,
bool has_covering_null_row_arg,
bool has_covering_null_columns_arg,
+ uint count_columns_with_nulls_arg,
Item_subselect *item_arg,
select_result_interceptor *result_arg,
List<Item> *equi_join_conds_arg)
:subselect_partial_match_engine(thd_arg, engine_arg, tmp_table_arg,
item_arg, result_arg, equi_join_conds_arg,
has_covering_null_row_arg,
- has_covering_null_columns_arg),
+ has_covering_null_columns_arg,
+ count_columns_with_nulls_arg),
merge_keys_count(merge_keys_count_arg), non_null_key(NULL)
{}
~subselect_rowid_merge_engine();
@@ -1295,7 +1371,8 @@ public:
select_result_interceptor *result_arg,
List<Item> *equi_join_conds_arg,
bool has_covering_null_row_arg,
- bool has_covering_null_columns_arg);
+ bool has_covering_null_columns_arg,
+ uint count_columns_with_nulls_arg);
void cleanup();
virtual enum_engine_type engine_type() { return TABLE_SCAN_ENGINE; }
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 55bcf868690..9bf786dbbd4 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -372,7 +372,9 @@ bool Item_sum::collect_outer_ref_processor(uchar *param)
{
Collect_deps_prm *prm= (Collect_deps_prm *)param;
SELECT_LEX *ds;
- if ((ds= depended_from()) && ds->nest_level < prm->nest_level)
+ if ((ds= depended_from()) &&
+ ds->nest_level_base == prm->nest_level_base &&
+ ds->nest_level < prm->nest_level)
prm->parameters->add_unique(this, &cmp_items);
return FALSE;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 37418e990a8..0d21d619421 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1486,7 +1486,8 @@ longlong Item_temporal_func::val_int()
MYSQL_TIME ltime;
if (get_date(&ltime, TIME_FUZZY_DATE | sql_mode))
return 0;
- return (longlong)TIME_to_ulonglong(&ltime);
+ longlong v= TIME_to_ulonglong(&ltime);
+ return ltime.neg ? -v : v;
}
@@ -2568,7 +2569,6 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
long days, microseconds;
longlong seconds;
int l_sign= sign, was_cut= 0;
- uint dec= decimals;
if (is_date) // TIMESTAMP function
{
@@ -2610,10 +2610,6 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
ltime->time_type= is_time ? MYSQL_TIMESTAMP_TIME : MYSQL_TIMESTAMP_DATETIME;
- if (cached_field_type == MYSQL_TYPE_STRING &&
- (l_time1.second_part || l_time2.second_part))
- dec= TIME_SECOND_PART_DIGITS;
-
if (!is_time)
{
get_date_from_daynr(days,&ltime->year,&ltime->month,&ltime->day);
diff --git a/sql/lex.h b/sql/lex.h
index ea6e9fd9707..bb8f5825879 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -249,6 +249,7 @@ static SYMBOL symbols[] = {
{ "GRANTS", SYM(GRANTS)},
{ "GROUP", SYM(GROUP_SYM)},
{ "HANDLER", SYM(HANDLER_SYM)},
+ { "HARD", SYM(HARD_SYM)},
{ "HASH", SYM(HASH_SYM)},
{ "HAVING", SYM(HAVING)},
{ "HELP", SYM(HELP_SYM)},
@@ -515,6 +516,7 @@ static SYMBOL symbols[] = {
{ "SNAPSHOT", SYM(SNAPSHOT_SYM)},
{ "SMALLINT", SYM(SMALLINT)},
{ "SOCKET", SYM(SOCKET_SYM)},
+ { "SOFT", SYM(SOFT_SYM)},
{ "SOME", SYM(ANY_SYM)},
{ "SONAME", SYM(SONAME_SYM)},
{ "SOUNDS", SYM(SOUNDS_SYM)},
diff --git a/sql/log.cc b/sql/log.cc
index 6819a1e767d..a7698fd3ae6 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -81,7 +81,7 @@ static LEX_STRING const write_error_msg=
static my_bool opt_optimize_thread_scheduling= TRUE;
ulong binlog_checksum_options;
#ifndef DBUG_OFF
-static ulong opt_binlog_dbug_fsync_sleep= 0;
+ulong opt_binlog_dbug_fsync_sleep= 0;
#endif
mysql_mutex_t LOCK_prepare_ordered;
@@ -1256,17 +1256,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
query_length= command_name[thd->command].length;
}
- if (!query_length)
- {
- /*
- Not a real query; Reset counts for slow query logging
- (QQ: Wonder if this is really needed)
- */
- thd->sent_row_count= thd->examined_row_count= 0;
- thd->query_plan_flags= QPLAN_INIT;
- thd->query_plan_fsort_passes= 0;
- }
-
for (current_handler= slow_log_handler_list; *current_handler ;)
error= (*current_handler++)->log_slow(thd, current_time,
user_host_buff, user_host_len,
@@ -2088,7 +2077,7 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
log_query.append(thd->lex->ident.str, thd->lex->ident.length) ||
log_query.append("`"))
DBUG_RETURN(1);
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
Query_log_event qinfo(thd, log_query.ptr(), log_query.length(),
TRUE, FALSE, TRUE, errcode);
DBUG_RETURN(mysql_bin_log.write(&qinfo));
@@ -2112,7 +2101,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
log_query.append(thd->lex->ident.str, thd->lex->ident.length) ||
log_query.append("`"))
DBUG_RETURN(1);
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
Query_log_event qinfo(thd, log_query.ptr(), log_query.length(),
TRUE, FALSE, TRUE, errcode);
DBUG_RETURN(mysql_bin_log.write(&qinfo));
@@ -5614,7 +5603,7 @@ int query_error_code(THD *thd, bool not_killed)
{
int error;
- if (not_killed || (thd->killed == THD::KILL_BAD_DATA))
+ if (not_killed || (killed_mask_hard(thd->killed) == KILL_BAD_DATA))
{
error= thd->is_error() ? thd->stmt_da->sql_errno() : 0;
@@ -5623,7 +5612,8 @@ int query_error_code(THD *thd, bool not_killed)
is not set to these errors when specified not_killed by the
caller.
*/
- if (error == ER_SERVER_SHUTDOWN || error == ER_QUERY_INTERRUPTED)
+ if (error == ER_SERVER_SHUTDOWN || error == ER_QUERY_INTERRUPTED ||
+ error == ER_NEW_ABORTING_CONNECTION || error == ER_CONNECTION_KILLED)
error= 0;
}
else
@@ -7569,27 +7559,10 @@ static MYSQL_SYSVAR_ENUM(
BINLOG_CHECKSUM_ALG_OFF,
&binlog_checksum_typelib);
-#ifndef DBUG_OFF
-static MYSQL_SYSVAR_ULONG(
- dbug_fsync_sleep,
- opt_binlog_dbug_fsync_sleep,
- PLUGIN_VAR_RQCMDARG,
- "Extra sleep (in microseconds) to add to binlog fsync(), for debugging",
- NULL,
- NULL,
- 0,
- 0,
- ULONG_MAX,
- 0);
-#endif
-
static struct st_mysql_sys_var *binlog_sys_vars[]=
{
MYSQL_SYSVAR(optimize_thread_scheduling),
MYSQL_SYSVAR(checksum),
-#ifndef DBUG_OFF
- MYSQL_SYSVAR(dbug_fsync_sleep),
-#endif
NULL
};
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 37245e6eb21..dddd24bc299 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -457,6 +457,7 @@ inline bool unexpected_error_code(int unexpected_error)
case ER_NET_READ_ERROR:
case ER_NET_ERROR_ON_WRITE:
case ER_QUERY_INTERRUPTED:
+ case ER_CONNECTION_KILLED:
case ER_SERVER_SHUTDOWN:
case ER_NEW_ABORTING_CONNECTION:
return(TRUE);
@@ -3793,7 +3794,7 @@ Default database: '%s'. Query: '%s'",
{
DBUG_PRINT("info",("error ignored"));
clear_all_errors(thd, const_cast<Relay_log_info*>(rli));
- thd->killed= THD::NOT_KILLED;
+ thd->killed= NOT_KILLED;
}
/*
Other cases: mostly we expected no error and get one.
diff --git a/sql/log_event.h b/sql/log_event.h
index 0383e746d94..c9810cbb140 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -3145,7 +3145,7 @@ char *str_to_hex(char *to, const char *from, uint len);
/**
@class Annotate_rows_log_event
- In row-based mode, if binlog_annotate_rows_events = ON, each group of
+ In row-based mode, if binlog_annotate_row_events = ON, each group of
Table_map_log_events is preceded by an Annotate_rows_log_event which
contains the query which caused the subsequent rows operations.
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 130ab676e7b..d8dc45d38c7 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -440,10 +440,10 @@ void Mrr_ordered_index_reader::interrupt_read()
{
DBUG_ASSERT(support_scan_interruptions);
TABLE *table= file->get_table();
+ KEY *used_index= &table->key_info[file->active_index];
/* Save the current key value */
key_copy(saved_key_tuple, table->record[0],
- &table->key_info[file->active_index],
- keypar.key_tuple_length);
+ used_index, used_index->key_length);
if (saved_primary_key)
{
@@ -468,9 +468,9 @@ void Mrr_ordered_index_reader::position()
void Mrr_ordered_index_reader::resume_read()
{
TABLE *table= file->get_table();
+ KEY *used_index= &table->key_info[file->active_index];
key_restore(table->record[0], saved_key_tuple,
- &table->key_info[file->active_index],
- keypar.key_tuple_length);
+ used_index, used_index->key_length);
if (saved_primary_key)
{
key_restore(table->record[0], saved_primary_key,
@@ -547,7 +547,7 @@ int Mrr_ordered_index_reader::init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
mrr_funcs= *seq_funcs;
source_exhausted= FALSE;
if (support_scan_interruptions)
- bzero(saved_key_tuple, keypar.key_tuple_length);
+ bzero(saved_key_tuple, key_info->key_length);
have_saved_rowid= FALSE;
return 0;
}
@@ -864,12 +864,14 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
if (h_idx->primary_key_is_clustered())
{
uint pk= h_idx->get_table()->s->primary_key;
- saved_pk_length= h_idx->get_table()->key_info[pk].key_length;
+ if (pk != MAX_KEY)
+ saved_pk_length= h_idx->get_table()->key_info[pk].key_length;
}
-
+
+ KEY *used_index= &h_idx->get_table()->key_info[h_idx->active_index];
if (reader_factory.ordered_index_reader.
set_interruption_temp_buffer(primary_file->ref_length,
- keypar.key_tuple_length,
+ used_index->key_length,
saved_pk_length,
&full_buf, full_buf_end))
goto use_default_impl;
@@ -1651,7 +1653,8 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
uint *buffer_size, COST_VECT *cost)
{
ulong max_buff_entries, elem_size;
- ha_rows rows_in_full_step, rows_in_last_step;
+ ha_rows rows_in_full_step;
+ ha_rows rows_in_last_step;
uint n_full_steps;
double index_read_cost;
@@ -1676,7 +1679,7 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
/* Adjust buffer size if we expect to use only part of the buffer */
if (n_full_steps)
{
- get_sort_and_sweep_cost(table, rows, cost);
+ get_sort_and_sweep_cost(table, rows_in_full_step, cost);
cost->multiply(n_full_steps);
}
else
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index b6801d92ca8..46b6ec3978a 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -409,7 +409,7 @@ my_bool opt_local_infile, opt_slave_compressed_protocol;
my_bool opt_safe_user_create = 0;
my_bool opt_show_slave_auth_info;
my_bool opt_log_slave_updates= 0;
-my_bool opt_replicate_annotate_rows_events= 0;
+my_bool opt_replicate_annotate_row_events= 0;
char *opt_slave_skip_errors;
/*
@@ -500,6 +500,11 @@ my_decimal decimal_zero;
*/
ulong max_long_data_size;
+/* Limits for internal temporary tables (MyISAM or Aria) */
+uint internal_tmp_table_max_key_length;
+uint internal_tmp_table_max_key_segments;
+
+bool max_user_connections_checking=0;
/**
Limit of the total number of prepared statements in the server.
Is necessary to protect the server against out-of-memory attacks.
@@ -1389,7 +1394,7 @@ static void close_connections(void)
if (tmp->slave_thread)
continue;
- tmp->killed= THD::KILL_CONNECTION;
+ tmp->killed= KILL_SERVER_HARD;
MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (tmp));
mysql_mutex_lock(&tmp->LOCK_thd_data);
if (tmp->mysys_var)
@@ -1449,7 +1454,7 @@ static void close_connections(void)
tmp->thread_id,
(tmp->main_security_ctx.user ?
tmp->main_security_ctx.user : ""));
- close_connection(tmp);
+ close_connection(tmp,ER_SERVER_SHUTDOWN);
}
#endif
DBUG_PRINT("quit",("Unlocking LOCK_thread_count"));
@@ -2335,6 +2340,16 @@ void close_connection(THD *thd, uint sql_errno)
if (sql_errno)
net_send_error(thd, sql_errno, ER_DEFAULT(sql_errno), NULL);
+ if (global_system_variables.log_warnings > 3)
+ {
+ Security_context *sctx= &thd->main_security_ctx;
+ sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
+ thd->thread_id,(thd->db ? thd->db : "unconnected"),
+ sctx->user ? sctx->user : "unauthenticated",
+ sctx->host_or_ip,
+ (sql_errno ? ER(sql_errno) : "CLOSE_CONNECTION"));
+ }
+
thd->disconnect();
MYSQL_CONNECTION_DONE((int) sql_errno, thd->thread_id);
@@ -2503,6 +2518,7 @@ static bool cache_thread()
*/
thd->mysys_var->abort= 0;
thd->thr_create_utime= microsecond_interval_timer();
+ thd->start_utime= thd->thr_create_utime;
threads.append(thd);
return(1);
}
@@ -2567,24 +2583,6 @@ void flush_thread_cache()
}
-#ifdef THREAD_SPECIFIC_SIGPIPE
-/**
- Aborts a thread nicely. Comes here on SIGPIPE.
-
- @todo
- One should have to fix that thr_alarm know about this thread too.
-*/
-extern "C" sig_handler abort_thread(int sig __attribute__((unused)))
-{
- THD *thd=current_thd;
- DBUG_ENTER("abort_thread");
- if (thd)
- thd->killed= THD::KILL_CONNECTION;
- DBUG_VOID_RETURN;
-}
-#endif
-
-
/******************************************************************************
Setup a signal thread with handles all signals.
Because Linux doesn't support schemas use a mutex to check that
@@ -2798,6 +2796,8 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n",
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
and this may fail.\n\n");
+ set_server_version();
+ fprintf(stderr, "Server version: %s\n", server_version);
fprintf(stderr, "key_buffer_size=%lu\n",
(ulong) dflt_key_cache->key_cache_mem_size);
fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size);
@@ -2842,20 +2842,29 @@ the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
{
const char *kreason= "UNKNOWN";
switch (thd->killed) {
- case THD::NOT_KILLED:
+ case NOT_KILLED:
+ case KILL_HARD_BIT:
kreason= "NOT_KILLED";
break;
- case THD::KILL_BAD_DATA:
+ case KILL_BAD_DATA:
+ case KILL_BAD_DATA_HARD:
kreason= "KILL_BAD_DATA";
break;
- case THD::KILL_CONNECTION:
+ case KILL_CONNECTION:
+ case KILL_CONNECTION_HARD:
kreason= "KILL_CONNECTION";
break;
- case THD::KILL_QUERY:
+ case KILL_QUERY:
+ case KILL_QUERY_HARD:
kreason= "KILL_QUERY";
break;
- case THD::KILLED_NO_VALUE:
- kreason= "KILLED_NO_VALUE";
+ case KILL_SYSTEM_THREAD:
+ case KILL_SYSTEM_THREAD_HARD:
+ kreason= "KILL_SYSTEM_THREAD";
+ break;
+ case KILL_SERVER:
+ case KILL_SERVER_HARD:
+ kreason= "KILL_SERVER";
break;
}
fprintf(stderr, "\nTrying to get some variables.\n"
@@ -2865,8 +2874,7 @@ the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
fprintf(stderr, "\nConnection ID (thread ID): %lu\n", (ulong) thd->thread_id);
fprintf(stderr, "Status: %s\n", kreason);
fprintf(stderr, "Optimizer switch: ");
-
- ulonglong optsw= global_system_variables.optimizer_switch;
+ ulonglong optsw= thd->variables.optimizer_switch;
for (uint i= 0; optimizer_switch_names[i+1]; i++, optsw >>= 1)
{
if (i)
@@ -3218,7 +3226,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
sql_print_message_func func;
DBUG_ENTER("my_message_sql");
- DBUG_PRINT("error", ("error: %u message: '%s'", error, str));
+ DBUG_PRINT("error", ("error: %u message: '%s' Flag: %d", error, str, MyFlags));
DBUG_ASSERT(str != NULL);
DBUG_ASSERT(error != 0);
@@ -3250,7 +3258,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
/* When simulating OOM, skip writing to error log to avoid mtr errors */
DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_VOID_RETURN;);
- if (!thd || (MyFlags & ME_NOREFRESH))
+ if (!thd || thd->log_all_errors || (MyFlags & ME_NOREFRESH))
(*func)("%s: %s", my_progname_short, str); /* purecov: inspected */
DBUG_VOID_RETURN;
}
@@ -4336,11 +4344,13 @@ a file name for --log-bin-index option", opt_binlog_index_name);
require a name. But as we don't want to break many existing setups, we
only give warning, not error.
*/
- sql_print_warning("No argument was provided to --log-bin, and "
- "--log-bin-index was not used; so replication "
- "may break when this MySQL server acts as a "
- "master and has his hostname changed!! Please "
- "use '--log-bin=%s' to avoid this problem.", ln);
+ sql_print_warning("No argument was provided to --log-bin and "
+ "neither --log-basename or --log-bin-index where "
+ "used; This may cause repliction to break when this "
+ "server acts as a master and has its hostname "
+ "changed! Please use '--log-basename=%s' or "
+ "'--log-bin=%s' to avoid this problem.",
+ opt_log_basename, ln);
}
if (ln == buf)
{
@@ -4497,6 +4507,11 @@ a file name for --log-bin-index option", opt_binlog_index_name);
sql_print_error("Aria engine is not enabled or did not start. The Aria engine must be enabled to continue as mysqld was configured with --with-aria-tmp-tables");
unireg_abort(1);
}
+ internal_tmp_table_max_key_length= maria_max_key_length();
+ internal_tmp_table_max_key_segments= maria_max_key_segments();
+#else
+ internal_tmp_table_max_key_length= myisam_max_key_length();
+ internal_tmp_table_max_key_segments= myisam_max_key_segments();
#endif
tc_log= (total_ha_2pc > 1 ? (opt_bin_log ?
@@ -5446,7 +5461,7 @@ void create_thread_to_handle_connection(THD *thd)
thread_created++;
threads.append(thd);
DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id));
- thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer();
+ thd->prior_thr_create_utime= microsecond_interval_timer();
if ((error= mysql_thread_create(key_thread_one_connection,
&thd->real_id, &connection_attrib,
handle_one_connection,
@@ -5457,7 +5472,7 @@ void create_thread_to_handle_connection(THD *thd)
("Can't create thread to handle request (error %d)",
error));
thread_count--;
- thd->killed= THD::KILL_CONNECTION; // Safety
+ thd->killed= KILL_CONNECTION; // Safety
mysql_mutex_unlock(&LOCK_thread_count);
mysql_mutex_lock(&LOCK_connection_count);
@@ -5469,7 +5484,7 @@ void create_thread_to_handle_connection(THD *thd)
my_snprintf(error_message_buff, sizeof(error_message_buff),
ER_THD(thd, ER_CANT_CREATE_THREAD), error);
net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff, NULL);
- close_connection(thd);
+ close_connection(thd, ER_OUT_OF_RESOURCES);
mysql_mutex_lock(&LOCK_thread_count);
delete thd;
mysql_mutex_unlock(&LOCK_thread_count);
@@ -6543,6 +6558,10 @@ struct my_option my_long_options[]=
&table_cache_size, &table_cache_size, 0, GET_ULONG,
REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
#ifndef DBUG_OFF
+ {"debug-assert-on-error", 0,
+ "Do an assert in various functions if we get a fatal error",
+ &my_assert_on_error, &my_assert_on_error,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"debug-assert-if-crashed-table", 0,
"Do an assert in handler::print_error() if we get a crashed table",
&debug_assert_if_crashed_table, &debug_assert_if_crashed_table,
@@ -7969,6 +7988,19 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
my_disable_thr_alarm= opt_thread_alarm == 0;
my_default_record_cache_size=global_system_variables.read_buff_size;
+ /*
+ Log mysys errors when we don't have a thd or thd->log_all_errors is set
+ (recovery) to the log. This is mainly useful for debugging strange system
+ errors.
+ */
+ if (global_system_variables.log_warnings >= 10)
+ my_global_flags= MY_WME | ME_JUST_INFO;
+ /* Log all errors not handled by thd->handle_error() to my_message_sql() */
+ if (global_system_variables.log_warnings >= 11)
+ my_global_flags|= ME_NOREFRESH;
+ if (my_assert_on_error)
+ debug_assert_if_crashed_table= 1;
+
global_system_variables.long_query_time= (ulonglong)
(global_system_variables.long_query_time_double * 1e6);
@@ -8012,6 +8044,8 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
if (!max_long_data_size_used)
max_long_data_size= global_system_variables.max_allowed_packet;
+ /* Rember if max_user_connections was 0 at startup */
+ max_user_connections_checking= global_system_variables.max_user_connections != 0;
return 0;
}
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 7b760c911d0..658d578a995 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -161,7 +161,7 @@ extern my_bool allow_slave_start;
extern LEX_CSTRING reason_slave_blocked;
extern ulong slave_trans_retries;
extern uint slave_net_timeout;
-extern uint max_user_connections;
+extern int max_user_connections;
extern ulong what_to_log,flush_time;
extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong open_files_limit;
@@ -534,6 +534,11 @@ extern char *opt_log_basename;
extern my_bool opt_master_verify_checksum;
extern my_bool opt_slave_sql_verify_checksum;
extern ulong binlog_checksum_options;
+extern bool max_user_connections_checking;
+extern ulong opt_binlog_dbug_fsync_sleep;
+
+extern uint internal_tmp_table_max_key_length;
+extern uint internal_tmp_table_max_key_segments;
extern uint volatile global_disable_checkpoint;
extern my_bool opt_help, opt_thread_alarm;
diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc
index cee96d88438..12a732176a5 100644
--- a/sql/opt_index_cond_pushdown.cc
+++ b/sql/opt_index_cond_pushdown.cc
@@ -83,15 +83,44 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno,
case Item::FIELD_ITEM:
{
Item_field *item_field= (Item_field*)item;
- if (item_field->field->table != tbl)
+ Field *field= item_field->field;
+ if (field->table != tbl)
return TRUE;
/*
The below is probably a repetition - the first part checks the
other two, but let's play it safe:
*/
- return item_field->field->part_of_key.is_set(keyno) &&
- item_field->field->type() != MYSQL_TYPE_GEOMETRY &&
- item_field->field->type() != MYSQL_TYPE_BLOB;
+ if(!field->part_of_key.is_set(keyno) ||
+ field->type() == MYSQL_TYPE_GEOMETRY ||
+ field->type() == MYSQL_TYPE_BLOB)
+ return FALSE;
+ KEY *key_info= tbl->key_info + keyno;
+ KEY_PART_INFO *key_part= key_info->key_part;
+ KEY_PART_INFO *key_part_end= key_part + key_info->key_parts;
+ for ( ; key_part < key_part_end; key_part++)
+ {
+ if (field->eq(key_part->field))
+ return !(key_part->key_part_flag & HA_PART_KEY_SEG);
+ }
+ if ((tbl->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ tbl->s->primary_key != MAX_KEY &&
+ tbl->s->primary_key != keyno)
+ {
+ key_info= tbl->key_info + tbl->s->primary_key;
+ key_part= key_info->key_part;
+ key_part_end= key_part + key_info->key_parts;
+ for ( ; key_part < key_part_end; key_part++)
+ {
+ /*
+ It does not make sense to use the fact that the engine can read in
+ a full field if the key if the index is built only over a part
+ of this field.
+ */
+ if (field->eq(key_part->field))
+ return !(key_part->key_part_flag & HA_PART_KEY_SEG);
+ }
+ }
+ return FALSE;
}
case Item::REF_ITEM:
return uses_index_fields_only(item->real_item(), tbl, keyno,
@@ -210,11 +239,9 @@ Item *make_cond_for_index(Item *cond, TABLE *table, uint keyno,
}
-Item *make_cond_remainder(Item *cond, bool exclude_index)
+Item *make_cond_remainder(Item *cond, TABLE *table, uint keyno,
+ bool other_tbls_ok, bool exclude_index)
{
- if (exclude_index && cond->marker == ICP_COND_USES_INDEX_ONLY)
- return 0; /* Already checked */
-
if (cond->type() == Item::COND_ITEM)
{
table_map tbl_map= 0;
@@ -228,7 +255,8 @@ Item *make_cond_remainder(Item *cond, bool exclude_index)
Item *item;
while ((item=li++))
{
- Item *fix= make_cond_remainder(item, exclude_index);
+ Item *fix= make_cond_remainder(item, table, keyno,
+ other_tbls_ok, exclude_index);
if (fix)
{
new_cond->argument_list()->push_back(fix);
@@ -255,7 +283,8 @@ Item *make_cond_remainder(Item *cond, bool exclude_index)
Item *item;
while ((item=li++))
{
- Item *fix= make_cond_remainder(item, FALSE);
+ Item *fix= make_cond_remainder(item, table, keyno,
+ other_tbls_ok, FALSE);
if (!fix)
return (COND*) 0;
new_cond->argument_list()->push_back(fix);
@@ -267,7 +296,14 @@ Item *make_cond_remainder(Item *cond, bool exclude_index)
return new_cond;
}
}
- return cond;
+ else
+ {
+ if (exclude_index &&
+ uses_index_fields_only(cond, table, keyno, other_tbls_ok))
+ return 0;
+ else
+ return cond;
+ }
}
@@ -288,30 +324,13 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
{
DBUG_ENTER("push_index_cond");
Item *idx_cond;
- bool do_index_cond_pushdown=
- ((tab->table->file->index_flags(keyno, 0, 1) &
- HA_DO_INDEX_COND_PUSHDOWN) &&
- optimizer_flag(tab->join->thd, OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN));
-
- /*
- Do not try index condition pushdown on indexes which have partially-covered
- columns. Unpacking from a column prefix into index tuple is not a supported
- operation in some engines, see e.g. MySQL BUG#42991.
- TODO: a better solution would be not to consider partially-covered columns
- as parts of the index and still produce/check index condition for
- fully-covered index columns.
- */
- KEY *key_info= tab->table->key_info + keyno;
- for (uint kp= 0; kp < key_info->key_parts; kp++)
- {
- if ((key_info->key_part[kp].key_part_flag & HA_PART_KEY_SEG))
- {
- do_index_cond_pushdown= FALSE;
- break;
- }
- }
- if (do_index_cond_pushdown)
+ if ((tab->table->file->index_flags(keyno, 0, 1) &
+ HA_DO_INDEX_COND_PUSHDOWN) &&
+ optimizer_flag(tab->join->thd, OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN) &&
+ tab->join->thd->lex->sql_command != SQLCOM_UPDATE_MULTI &&
+ tab->join->thd->lex->sql_command != SQLCOM_DELETE_MULTI &&
+ tab->type != JT_CONST && tab->type != JT_SYSTEM)
{
DBUG_EXECUTE("where",
print_where(tab->select_cond, "full cond", QT_ORDINARY););
@@ -356,7 +375,8 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
tab->ref.disable_cache= TRUE;
Item *row_cond= tab->idx_cond_fact_out ?
- make_cond_remainder(tab->select_cond, TRUE) :
+ make_cond_remainder(tab->select_cond, tab->table, keyno,
+ tab->icp_other_tables_ok, TRUE) :
tab->pre_idx_push_select_cond;
DBUG_EXECUTE("where",
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 9a61317b1a7..2a922541f72 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1164,10 +1164,10 @@ int SEL_IMERGE::and_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree,
2. In the second mode, when is_first_check_pass==FALSE :
2.1. For each rt_j in the imerge that can be ored (see the function
- sel_trees_can_be_ored), but not must be ored, with rt the function
- replaces rt_j for a range tree such that for each index for which
- ranges are defined in both in rt_j and rt the tree contains the
- result of oring of these ranges.
+ sel_trees_can_be_ored) with rt the function replaces rt_j for a
+ range tree such that for each index for which ranges are defined
+ in both in rt_j and rt the tree contains the result of oring of
+ these ranges.
2.2. In other cases the function does not produce any imerge.
When is_first_check==TRUE the function returns FALSE in the parameter
@@ -1191,7 +1191,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
bool *is_last_check_pass)
{
bool was_ored= FALSE;
- *is_last_check_pass= TRUE;
+ *is_last_check_pass= is_first_check_pass;
SEL_TREE** or_tree = trees;
for (uint i= 0; i < n_trees; i++, or_tree++)
{
@@ -1202,7 +1202,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
{
bool must_be_ored= sel_trees_must_be_ored(param, *or_tree, tree,
ored_keys);
- if (must_be_ored || !is_first_check_pass)
+ if (must_be_ored || !is_first_check_pass)
{
result_keys.clear_all();
result= *or_tree;
@@ -1238,22 +1238,19 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
{
if (result_keys.is_clear_all())
result->type= SEL_TREE::ALWAYS;
- *is_last_check_pass= TRUE;
if ((result->type == SEL_TREE::MAYBE) ||
(result->type == SEL_TREE::ALWAYS))
return 1;
/* SEL_TREE::IMPOSSIBLE is impossible here */
result->keys_map= result_keys;
*or_tree= result;
- if (is_first_check_pass)
- return 0;
was_ored= TRUE;
}
}
if (was_ored)
return 0;
- if (!*is_last_check_pass &&
+ if (is_first_check_pass && !*is_last_check_pass &&
!(tree= new SEL_TREE(tree, FALSE, param)))
return (-1);
return or_sel_tree(param, tree);
@@ -8473,9 +8470,9 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
/* Build the imerge part of the tree for the formula (1) */
SEL_TREE *rt1= tree1;
SEL_TREE *rt2= tree2;
- if (!no_merges1)
+ if (no_merges1)
rt1= new SEL_TREE(tree1, TRUE, param);
- if (!no_merges2)
+ if (no_merges2)
rt2= new SEL_TREE(tree2, TRUE, param);
if (!rt1 || !rt2 ||
result->merges.push_back(imerge_from_ranges) ||
@@ -9170,6 +9167,13 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
key2: [---]
tmp: [---------]
*/
+ if (key2->use_count)
+ {
+ SEL_ARG *key2_cpy= new SEL_ARG(*key2);
+ if (key2_cpy)
+ return 0;
+ key2= key2_cpy;
+ }
key2->copy_max_to_min(tmp);
continue;
}
diff --git a/sql/opt_range.h b/sql/opt_range.h
index a52f3c2cd3a..cf8da1acb0d 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -375,6 +375,12 @@ public:
Table record buffer used by this quick select.
*/
uchar *record;
+
+ virtual void replace_handler(handler *new_file)
+ {
+ DBUG_ASSERT(0); /* Only supported in QUICK_RANGE_SELECT */
+ }
+
#ifndef DBUG_OFF
/*
Print quick select information to DBUG_FILE. Caller is responsible
@@ -475,6 +481,7 @@ public:
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
+ virtual void replace_handler(handler *new_file) { file= new_file; }
QUICK_SELECT_I *make_reverse(uint used_key_parts_arg);
private:
/* Default copy ctor used by QUICK_SELECT_DESC */
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index c1fe8de51a4..0aa6fb7e913 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -209,6 +209,74 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
/*
+ Check if Materialization strategy is allowed for given subquery predicate.
+
+ @param thd Thread handle
+ @param in_subs The subquery predicate
+ @param child_select The select inside predicate (the function will
+ check it is the only one)
+
+ @return TRUE - Materialization is applicable
+ FALSE - Otherwise
+*/
+
+bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
+ st_select_lex *child_select)
+{
+ st_select_lex_unit* parent_unit= child_select->master_unit();
+ /*
+ Check if the subquery predicate can be executed via materialization.
+ The required conditions are:
+ 0. The materialization optimizer switch was set.
+ 1. Subquery is a single SELECT (not a UNION).
+ TODO: this is a limitation that can be fixed
+ 2. Subquery is not a table-less query. In this case there is no
+ point in materializing.
+ 2A The upper query is not a table-less SELECT ... FROM DUAL. We
+ can't do materialization for SELECT .. FROM DUAL because it
+ does not call setup_subquery_materialization(). We could make
+ SELECT ... FROM DUAL call that function but that doesn't seem
+ to be the case that is worth handling.
+ 3. Either the subquery predicate is a top-level predicate, or at
+ least one partial match strategy is enabled. If no partial match
+ strategy is enabled, then materialization cannot be used for
+ non-top-level queries because it cannot handle NULLs correctly.
+ 4. Subquery is non-correlated
+ TODO:
+ This condition is too restrictive (limitation). It can be extended to:
+ (Subquery is non-correlated ||
+ Subquery is correlated to any query outer to IN predicate ||
+ (Subquery is correlated to the immediate outer query &&
+ Subquery !contains {GROUP BY, ORDER BY [LIMIT],
+ aggregate functions}) && subquery predicate is not under "NOT IN"))
+
+ (*) The subquery must be part of a SELECT statement. The current
+ condition also excludes multi-table update statements.
+ A note about prepared statements: we want the if-branch to be taken on
+ PREPARE and each EXECUTE. The rewrites are only done once, but we need
+ select_lex->sj_subselects list to be populated for every EXECUTE.
+
+ */
+ if (optimizer_flag(thd, OPTIMIZER_SWITCH_MATERIALIZATION) && // 0
+ !child_select->is_part_of_union() && // 1
+ parent_unit->first_select()->leaf_tables.elements && // 2
+ thd->lex->sql_command == SQLCOM_SELECT && // *
+ child_select->outer_select()->leaf_tables.elements && // 2A
+ subquery_types_allow_materialization(in_subs) &&
+ (in_subs->is_top_level_item() || //3
+ optimizer_flag(thd,
+ OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE) || //3
+ optimizer_flag(thd,
+ OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN)) && //3
+ !in_subs->is_correlated) //4
+ {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/*
Check if we need JOIN::prepare()-phase subquery rewrites and if yes, do them
SYNOPSIS
@@ -339,10 +407,10 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
!select_lex->is_part_of_union() && // 2
!select_lex->group_list.elements && !join->order && // 3
!join->having && !select_lex->with_sum_func && // 4
- thd->thd_marker.emb_on_expr_nest && // 5
+ in_subs->emb_on_expr_nest && // 5
select_lex->outer_select()->join && // 6
parent_unit->first_select()->leaf_tables.elements && // 7
- !in_subs->in_strategy && // 8
+ !in_subs->has_strategy() && // 8
select_lex->outer_select()->leaf_tables.elements && // 9
!((join->select_options | // 10
select_lex->outer_select()->join->select_options) // 10
@@ -352,7 +420,6 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
(void)subquery_types_allow_materialization(in_subs);
- in_subs->emb_on_expr_nest= thd->thd_marker.emb_on_expr_nest;
in_subs->is_flattenable_semijoin= TRUE;
/* Register the subquery for further processing in flatten_subqueries() */
@@ -381,62 +448,17 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
*/
if (in_subs)
{
- /*
- Check if the subquery predicate can be executed via materialization.
- The required conditions are:
- 0. The materialization optimizer switch was set.
- 1. Subquery is a single SELECT (not a UNION).
- TODO: this is a limitation that can be fixed
- 2. Subquery is not a table-less query. In this case there is no
- point in materializing.
- 2A The upper query is not a table-less SELECT ... FROM DUAL. We
- can't do materialization for SELECT .. FROM DUAL because it
- does not call setup_subquery_materialization(). We could make
- SELECT ... FROM DUAL call that function but that doesn't seem
- to be the case that is worth handling.
- 3. Either the subquery predicate is a top-level predicate, or at
- least one partial match strategy is enabled. If no partial match
- strategy is enabled, then materialization cannot be used for
- non-top-level queries because it cannot handle NULLs correctly.
- 4. Subquery is non-correlated
- TODO:
- This condition is too restrictive (limitation). It can be extended to:
- (Subquery is non-correlated ||
- Subquery is correlated to any query outer to IN predicate ||
- (Subquery is correlated to the immediate outer query &&
- Subquery !contains {GROUP BY, ORDER BY [LIMIT],
- aggregate functions}) && subquery predicate is not under "NOT IN"))
-
- (*) The subquery must be part of a SELECT statement. The current
- condition also excludes multi-table update statements.
- A note about prepared statements: we want the if-branch to be taken on
- PREPARE and each EXECUTE. The rewrites are only done once, but we need
- select_lex->sj_subselects list to be populated for every EXECUTE.
-
- */
- if (optimizer_flag(thd, OPTIMIZER_SWITCH_MATERIALIZATION) && // 0
- !select_lex->is_part_of_union() && // 1
- parent_unit->first_select()->leaf_tables.elements && // 2
- thd->lex->sql_command == SQLCOM_SELECT && // *
- select_lex->outer_select()->leaf_tables.elements && // 2A
- subquery_types_allow_materialization(in_subs) &&
- (in_subs->is_top_level_item() || //3
- optimizer_flag(thd,
- OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE) || //3
- optimizer_flag(thd,
- OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN)) && //3
- !in_subs->is_correlated) //4
- {
- in_subs->in_strategy|= SUBS_MATERIALIZATION;
+ if (is_materialization_applicable(thd, in_subs, select_lex))
+ {
+ in_subs->add_strategy(SUBS_MATERIALIZATION);
/*
If the subquery is an AND-part of WHERE register for being processed
with jtbm strategy
*/
- if (thd->thd_marker.emb_on_expr_nest == NO_JOIN_NEST &&
+ if (in_subs->emb_on_expr_nest == NO_JOIN_NEST &&
optimizer_flag(thd, OPTIMIZER_SWITCH_SEMIJOIN))
{
- in_subs->emb_on_expr_nest= thd->thd_marker.emb_on_expr_nest;
in_subs->is_flattenable_semijoin= FALSE;
if (!in_subs->is_registered_semijoin)
{
@@ -456,17 +478,18 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
possible.
*/
if (optimizer_flag(thd, OPTIMIZER_SWITCH_IN_TO_EXISTS) ||
- !in_subs->in_strategy)
- {
- in_subs->in_strategy|= SUBS_IN_TO_EXISTS;
- }
+ !in_subs->has_strategy())
+ in_subs->add_strategy(SUBS_IN_TO_EXISTS);
}
/* Check if max/min optimization applicable */
- if (allany_subs)
- allany_subs->in_strategy|= (allany_subs->is_maxmin_applicable(join) ?
- (SUBS_MAXMIN_INJECTED | SUBS_MAXMIN_ENGINE) :
- SUBS_IN_TO_EXISTS);
+ if (allany_subs && !allany_subs->is_set_strategy())
+ {
+ uchar strategy= (allany_subs->is_maxmin_applicable(join) ?
+ (SUBS_MAXMIN_INJECTED | SUBS_MAXMIN_ENGINE) :
+ SUBS_IN_TO_EXISTS);
+ allany_subs->add_strategy(strategy);
+ }
/*
Transform each subquery predicate according to its overloaded
@@ -560,6 +583,16 @@ bool subquery_types_allow_materialization(Item_in_subselect *in_subs)
if (inner->field_type() == MYSQL_TYPE_BLOB ||
inner->field_type() == MYSQL_TYPE_GEOMETRY)
DBUG_RETURN(FALSE);
+ /*
+ Materialization also is unable to work when create_tmp_table() will
+ create a blob column because item->max_length is too big.
+ The following check is copied from Item::make_string_field():
+ */
+ if (inner->max_length / inner->collation.collation->mbmaxlen >
+ CONVERT_IF_BIGGER_TO_BLOB)
+ {
+ DBUG_RETURN(FALSE);
+ }
break;
case TIME_RESULT:
if (mysql_type_to_time_type(outer->field_type()) !=
@@ -825,14 +858,14 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
in_subq->unit->first_select()->join->table_count >= MAX_TABLES)
break;
if (convert_subq_to_sj(join, in_subq))
- DBUG_RETURN(TRUE);
+ goto restore_arena_and_fail;
}
else
{
if (join->table_count + 1 >= MAX_TABLES)
break;
if (convert_subq_to_jtbm(join, in_subq, &remove_item))
- DBUG_RETURN(TRUE);
+ goto restore_arena_and_fail;
}
if (remove_item)
{
@@ -841,7 +874,7 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
Item *replace_me= in_subq->original_item();
if (replace_where_subcondition(join, tree, replace_me, new Item_int(1),
FALSE))
- DBUG_RETURN(TRUE); /* purecov: inspected */
+ goto restore_arena_and_fail;
}
}
//skip_conversion:
@@ -877,12 +910,6 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
do_fix_fields))
DBUG_RETURN(TRUE);
in_subq->substitution= NULL;
-#if 0
- /*
- Don't do the following, because the simplify_join() call is after this
- call, and that call will save to prep_wher/prep_on_expr.
- */
-
/*
If this is a prepared statement, repeat the above operation for
prep_where (or prep_on_expr). Subquery-to-semijoin conversion is
@@ -893,21 +920,26 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
tree= (in_subq->emb_on_expr_nest == NO_JOIN_NEST)?
&join->select_lex->prep_where :
&(in_subq->emb_on_expr_nest->prep_on_expr);
-
- if (replace_where_subcondition(join, tree, replace_me, substitute,
+ /*
+ prep_on_expr/ prep_where may be NULL in some cases.
+ If that is the case, do nothing - simplify_joins() will copy
+ ON/WHERE expression into prep_on_expr/prep_where.
+ */
+ if (*tree && replace_where_subcondition(join, tree, replace_me, substitute,
FALSE))
DBUG_RETURN(TRUE);
}
-#endif
/*
Revert to the IN->EXISTS strategy in the rare case when the subquery could
not be flattened.
- TODO: This is a limitation done for simplicity. Such subqueries could also
- be executed via materialization. In order to determine this, we should
- re-run the test for materialization that was done in
- check_and_do_in_subquery_rewrites.
*/
- in_subq->in_strategy= SUBS_IN_TO_EXISTS;
+ in_subq->reset_strategy(SUBS_IN_TO_EXISTS);
+ if (is_materialization_applicable(thd, in_subq,
+ in_subq->unit->first_select()))
+ {
+ in_subq->add_strategy(SUBS_MATERIALIZATION);
+ }
+
in_subq= li++;
}
@@ -915,6 +947,11 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
thd->restore_active_arena(arena, &backup);
join->select_lex->sj_subselects.empty();
DBUG_RETURN(FALSE);
+
+restore_arena_and_fail:
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ DBUG_RETURN(TRUE);
}
@@ -985,7 +1022,6 @@ static bool replace_where_subcondition(JOIN *join, Item **expr,
Item *old_cond, Item *new_cond,
bool do_fix_fields)
{
- //Item **expr= (emb_nest == (TABLE_LIST*)1)? &join->conds : &emb_nest->on_expr;
if (*expr == old_cond)
{
*expr= new_cond;
@@ -1009,9 +1045,15 @@ static bool replace_where_subcondition(JOIN *join, Item **expr,
}
}
}
- // If we came here it means there were an error during prerequisites check.
- DBUG_ASSERT(0);
- return TRUE;
+ /*
+ We can come to here when
+ - we're doing replace operations on both on_expr and prep_on_expr
+ - on_expr is the same as prep_on_expr, or they share a sub-tree
+ (so, when we do replace in on_expr, we replace in prep_on_expr, too,
+ and when we try doing a replace in prep_on_expr, the item we wanted
+ to replace there has already been replaced)
+ */
+ return FALSE;
}
static int subq_sj_candidate_cmp(Item_in_subselect* el1, Item_in_subselect* el2,
@@ -1214,7 +1256,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
/* 3. Remove the original subquery predicate from the WHERE/ON */
// The subqueries were replaced for Item_int(1) earlier
- subq_pred->in_strategy= SUBS_SEMI_JOIN; // for subsequent executions
+ subq_pred->reset_strategy(SUBS_SEMI_JOIN); // for subsequent executions
/*TODO: also reset the 'with_subselect' there. */
/* n. Adjust the parent_join->table_count counter */
@@ -1388,8 +1430,9 @@ static bool convert_subq_to_jtbm(JOIN *parent_join,
double read_time;
DBUG_ENTER("convert_subq_to_jtbm");
- subq_pred->in_strategy &= ~SUBS_IN_TO_EXISTS;
- subq_pred->optimize(&rows, &read_time);
+ subq_pred->set_strategy(SUBS_MATERIALIZATION);
+ if (subq_pred->optimize(&rows, &read_time))
+ DBUG_RETURN(TRUE);
subq_pred->jtbm_read_time= read_time;
subq_pred->jtbm_record_count=rows;
@@ -1760,6 +1803,9 @@ int pull_out_semijoin_tables(JOIN *join)
All obtained information is saved and will be used by the main join
optimization pass.
+
+ NOTES
+ Because of Join::reoptimize(), this function may be called multiple times.
RETURN
FALSE Ok
@@ -2134,6 +2180,17 @@ void advance_sj_state(JOIN *join, table_map remaining_tables,
pos->sj_strategy= SJ_OPT_NONE;
pos->prefix_dups_producing_tables= join->cur_dups_producing_tables;
+
+ /* We're performing optimization inside SJ-Materialization nest */
+ if (join->emb_sjm_nest)
+ {
+ pos->invalidate_firstmatch_prefix();
+ pos->first_loosescan_table= MAX_TABLES;
+ pos->dupsweedout_tables= 0;
+ pos->sjm_scan_need_tables= 0;
+ return;
+ }
+
/* Initialize the state or copy it from prev. tables */
if (idx == join->const_tables)
{
@@ -2640,6 +2697,7 @@ ulonglong get_bound_sj_equalities(TABLE_LIST *sj_nest,
{
res |= 1ULL << i;
}
+ i++;
}
return res;
}
@@ -2936,6 +2994,11 @@ bool setup_sj_materialization_part1(JOIN_TAB *sjm_tab)
DBUG_ENTER("setup_sj_materialization");
JOIN_TAB *tab= sjm_tab->bush_children->start;
TABLE_LIST *emb_sj_nest= tab->table->pos_in_table_list->embedding;
+
+ /* Walk out of outer join nests until we reach the semi-join nest we're in */
+ while (!emb_sj_nest->sj_mat_info)
+ emb_sj_nest= emb_sj_nest->embedding;
+
SJ_MATERIALIZATION_INFO *sjm= emb_sj_nest->sj_mat_info;
THD *thd= tab->join->thd;
/* First the calls come to the materialization function */
@@ -2984,6 +3047,9 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab)
DBUG_ENTER("setup_sj_materialization_part2");
JOIN_TAB *tab= sjm_tab->bush_children->start;
TABLE_LIST *emb_sj_nest= tab->table->pos_in_table_list->embedding;
+ /* Walk out of outer join nests until we reach the semi-join nest we're in */
+ while (!emb_sj_nest->sj_mat_info)
+ emb_sj_nest= emb_sj_nest->embedding;
SJ_MATERIALIZATION_INFO *sjm= emb_sj_nest->sj_mat_info;
THD *thd= tab->join->thd;
uint i;
@@ -3320,7 +3386,7 @@ TABLE *create_duplicate_weedout_tmp_table(THD *thd,
bool using_unique_constraint=FALSE;
bool use_packed_rows= FALSE;
Field *field, *key_field;
- uint blob_count, null_pack_length, null_count;
+ uint null_pack_length, null_count;
uchar *null_flags;
uchar *pos;
DBUG_ENTER("create_duplicate_weedout_tmp_table");
@@ -3401,8 +3467,6 @@ TABLE *create_duplicate_weedout_tmp_table(THD *thd,
share->keys_for_keyread.init();
share->keys_in_use.init();
- blob_count= 0;
-
/* Create the field */
{
/*
@@ -3819,6 +3883,8 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
{
/* We jump from the last table to the first one */
tab->loosescan_match_tab= tab + pos->n_sj_tables - 1;
+ for (uint j= i; j < i + pos->n_sj_tables; j++)
+ join->join_tab[j].inside_loosescan_range= TRUE;
/* Calculate key length */
keylen= 0;
@@ -3854,7 +3920,7 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
if (j != join->const_tables && js_tab->use_quick != 2 &&
j <= no_jbuf_after &&
((js_tab->type == JT_ALL && join_cache_level != 0) ||
- (join_cache_level > 4 && (tab->type == JT_REF ||
+ (join_cache_level > 2 && (tab->type == JT_REF ||
tab->type == JT_EQ_REF))))
{
/* Looks like we'll be using join buffer */
@@ -4444,8 +4510,8 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
}
else
return false;
-
- DBUG_ASSERT(in_subs->in_strategy); /* A strategy must be chosen earlier. */
+ /* A strategy must be chosen earlier. */
+ DBUG_ASSERT(in_subs->has_strategy());
DBUG_ASSERT(in_to_exists_where || in_to_exists_having);
DBUG_ASSERT(!in_to_exists_where || in_to_exists_where->fixed);
DBUG_ASSERT(!in_to_exists_having || in_to_exists_having->fixed);
@@ -4455,8 +4521,8 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
strategies are possible and allowed by the user (checked during the prepare
phase.
*/
- if (in_subs->in_strategy & SUBS_MATERIALIZATION &&
- in_subs->in_strategy & SUBS_IN_TO_EXISTS)
+ if (in_subs->test_strategy(SUBS_MATERIALIZATION) &&
+ in_subs->test_strategy(SUBS_IN_TO_EXISTS))
{
JOIN *outer_join;
JOIN *inner_join= this;
@@ -4480,20 +4546,6 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
if (outer_join && outer_join->table_count > 0)
{
/*
- The index of the last JOIN_TAB in the outer JOIN where in_subs is
- attached (pushed to).
- */
- uint max_outer_join_tab_idx;
- /*
- Make_cond_for_table is called for predicates only in the WHERE/ON
- clauses. In all other cases, predicates are not pushed to any
- JOIN_TAB, and their join_tab_idx remains MAX_TABLES. Such predicates
- are evaluated for each complete row of the outer join.
- */
- max_outer_join_tab_idx= (in_subs->get_join_tab_idx() == MAX_TABLES) ?
- outer_join->table_count - 1:
- in_subs->get_join_tab_idx();
- /*
TODO:
Currently outer_lookup_keys is computed as the number of rows in
the partial join including the JOIN_TAB where the IN predicate is
@@ -4505,7 +4557,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
If the join order: t1, t2, the number of unique lookup keys is ~ to
the number of unique values t2.c2 in the partial join t1 join t2.
*/
- outer_join->get_partial_cost_and_fanout(max_outer_join_tab_idx,
+ outer_join->get_partial_cost_and_fanout(in_subs->get_join_tab_idx(),
table_map(-1),
&dummy,
&outer_lookup_keys);
@@ -4574,9 +4626,9 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
/* C.3 Compare the costs and choose the cheaper strategy. */
if (materialize_strategy_cost >= in_exists_strategy_cost)
- in_subs->in_strategy&= ~SUBS_MATERIALIZATION;
+ in_subs->set_strategy(SUBS_IN_TO_EXISTS);
else
- in_subs->in_strategy&= ~SUBS_IN_TO_EXISTS;
+ in_subs->set_strategy(SUBS_MATERIALIZATION);
DBUG_PRINT("info",
("mat_strategy_cost: %.2f, mat_cost: %.2f, write_cost: %.2f, lookup_cost: %.2f",
@@ -4597,7 +4649,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
otherwise
use materialization.
*/
- if (in_subs->in_strategy & SUBS_MATERIALIZATION &&
+ if (in_subs->test_strategy(SUBS_MATERIALIZATION) &&
in_subs->setup_mat_engine())
{
/*
@@ -4605,11 +4657,10 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
but it is not possible to execute it due to limitations in the
implementation, fall back to IN-TO-EXISTS.
*/
- in_subs->in_strategy&= ~SUBS_MATERIALIZATION;
- in_subs->in_strategy|= SUBS_IN_TO_EXISTS;
+ in_subs->set_strategy(SUBS_IN_TO_EXISTS);
}
- if (in_subs->in_strategy & SUBS_MATERIALIZATION)
+ if (in_subs->test_strategy(SUBS_MATERIALIZATION))
{
/* Restore the original query plan used for materialization. */
if (reopt_result == REOPT_NEW_PLAN)
@@ -4634,23 +4685,18 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
*/
select_limit= in_subs->unit->select_limit_cnt;
}
- else if (in_subs->in_strategy & SUBS_IN_TO_EXISTS)
+ else if (in_subs->test_strategy(SUBS_IN_TO_EXISTS))
{
if (reopt_result == REOPT_NONE && in_to_exists_where &&
const_tables != table_count)
{
/*
- The subquery was not reoptimized either because the user allowed only
- the IN-EXISTS strategy, or because materialization was not possible
- based on semantic analysis. Cleanup the original plan and reoptimize.
+ The subquery was not reoptimized with the newly injected IN-EXISTS
+ conditions either because the user allowed only the IN-EXISTS strategy,
+ or because materialization was not possible based on semantic analysis.
*/
- for (uint i= 0; i < table_count; i++)
- {
- join_tab[i].keyuse= NULL;
- join_tab[i].checked_keys.clear_all();
- }
- if ((reopt_result= reoptimize(in_to_exists_where, join_tables, NULL)) ==
- REOPT_ERROR)
+ reopt_result= reoptimize(in_to_exists_where, join_tables, NULL);
+ if (reopt_result == REOPT_ERROR)
return TRUE;
}
@@ -4720,7 +4766,7 @@ bool JOIN::choose_tableless_subquery_plan()
{
Item_in_subselect *in_subs;
in_subs= (Item_in_subselect*) subs_predicate;
- in_subs->in_strategy= SUBS_IN_TO_EXISTS;
+ in_subs->set_strategy(SUBS_IN_TO_EXISTS);
if (in_subs->create_in_to_exists_cond(this) ||
in_subs->inject_in_to_exists_cond(this))
return TRUE;
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index bcc8a42efc6..5962e7de706 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -631,6 +631,8 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
/* Condition doesn't restrict the used table */
DBUG_RETURN(!cond->const_item());
}
+ else if (cond->is_expensive())
+ DBUG_RETURN(FALSE);
if (cond->type() == Item::COND_ITEM)
{
if (((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC)
@@ -677,6 +679,8 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
case Item_func::GE_FUNC:
break;
case Item_func::BETWEEN:
+ if (((Item_func_between*) cond)->negated)
+ DBUG_RETURN(FALSE);
between= 1;
break;
case Item_func::MULT_EQUAL_FUNC:
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index 9ab6e0e84d2..3de73c3d4cc 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -587,10 +587,8 @@ void eliminate_tables(JOIN *join)
if (!join->outer_join)
DBUG_VOID_RETURN;
-#ifndef DBUG_OFF
if (!optimizer_flag(thd, OPTIMIZER_SWITCH_TABLE_ELIMINATION))
DBUG_VOID_RETURN; /* purecov: inspected */
-#endif
/* Find the tables that are referred to from WHERE/HAVING */
used_tables= (join->conds? join->conds->used_tables() : 0) |
@@ -694,6 +692,8 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
{
table_map outside_used_tables= tables_used_elsewhere |
tables_used_on_left;
+ if (on_expr)
+ outside_used_tables |= on_expr->used_tables();
if (tbl->nested_join)
{
/* This is "... LEFT JOIN (join_nest) ON cond" */
diff --git a/sql/records.cc b/sql/records.cc
index a2bb49f9792..1e74e6d7f30 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -71,7 +71,6 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bzero((char*) info,sizeof(*info));
info->thd= thd;
info->table= table;
- info->file= table->file;
info->record= table->record[0];
info->print_error= print_error;
info->unlock_row= rr_unlock_row;
@@ -181,7 +180,6 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
bzero((char*) info,sizeof(*info));
info->thd=thd;
info->table=table;
- info->file= table->file;
info->forms= &info->table; /* Only one table */
if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE &&
@@ -304,9 +302,9 @@ void end_read_record(READ_RECORD *info)
{
filesort_free_buffers(info->table,0);
if (info->table->created)
- (void) info->file->extra(HA_EXTRA_NO_CACHE);
+ (void) info->table->file->extra(HA_EXTRA_NO_CACHE);
if (info->read_record != rr_quick) // otherwise quick_range does it
- (void) info->file->ha_index_or_rnd_end();
+ (void) info->table->file->ha_index_or_rnd_end();
info->table=0;
}
}
@@ -365,7 +363,7 @@ static int rr_quick(READ_RECORD *info)
static int rr_index_first(READ_RECORD *info)
{
- int tmp= info->file->ha_index_first(info->record);
+ int tmp= info->table->file->ha_index_first(info->record);
info->read_record= rr_index;
if (tmp)
tmp= rr_handle_error(info, tmp);
@@ -388,7 +386,7 @@ static int rr_index_first(READ_RECORD *info)
static int rr_index_last(READ_RECORD *info)
{
- int tmp= info->file->ha_index_last(info->record);
+ int tmp= info->table->file->ha_index_last(info->record);
info->read_record= rr_index_desc;
if (tmp)
tmp= rr_handle_error(info, tmp);
@@ -414,7 +412,7 @@ static int rr_index_last(READ_RECORD *info)
static int rr_index(READ_RECORD *info)
{
- int tmp= info->file->ha_index_next(info->record);
+ int tmp= info->table->file->ha_index_next(info->record);
if (tmp)
tmp= rr_handle_error(info, tmp);
return tmp;
@@ -439,7 +437,7 @@ static int rr_index(READ_RECORD *info)
static int rr_index_desc(READ_RECORD *info)
{
- int tmp= info->file->ha_index_prev(info->record);
+ int tmp= info->table->file->ha_index_prev(info->record);
if (tmp)
tmp= rr_handle_error(info, tmp);
return tmp;
@@ -449,7 +447,7 @@ static int rr_index_desc(READ_RECORD *info)
int rr_sequential(READ_RECORD *info)
{
int tmp;
- while ((tmp= info->file->ha_rnd_next(info->record)))
+ while ((tmp= info->table->file->ha_rnd_next(info->record)))
{
/*
rnd_next can return RECORD_DELETED for MyISAM when one thread is
@@ -474,7 +472,7 @@ static int rr_from_tempfile(READ_RECORD *info)
{
if (my_b_read(info->io_cache,info->ref_pos,info->ref_length))
return -1; /* End of file */
- if (!(tmp= info->file->ha_rnd_pos(info->record,info->ref_pos)))
+ if (!(tmp= info->table->file->ha_rnd_pos(info->record,info->ref_pos)))
break;
/* The following is extremely unlikely to happen */
if (tmp == HA_ERR_RECORD_DELETED ||
@@ -525,7 +523,7 @@ static int rr_from_pointers(READ_RECORD *info)
cache_pos= info->cache_pos;
info->cache_pos+= info->ref_length;
- if (!(tmp= info->file->ha_rnd_pos(info->record,cache_pos)))
+ if (!(tmp= info->table->file->ha_rnd_pos(info->record,cache_pos)))
break;
/* The following is extremely unlikely to happen */
@@ -658,7 +656,7 @@ static int rr_from_cache(READ_RECORD *info)
record=uint3korr(position);
position+=3;
record_pos=info->cache+record*info->reclength;
- if ((error=(int16) info->file->ha_rnd_pos(record_pos,info->ref_pos)))
+ if ((error=(int16) info->table->file->ha_rnd_pos(record_pos,info->ref_pos)))
{
record_pos[info->error_offset]=1;
shortstore(record_pos,error);
@@ -689,7 +687,7 @@ static int rr_cmp(uchar *a,uchar *b)
if (a[4] != b[4])
return (int) a[4] - (int) b[4];
if (a[5] != b[5])
- return (int) a[1] - (int) b[5];
+ return (int) a[5] - (int) b[5];
if (a[6] != b[6])
return (int) a[6] - (int) b[6];
return (int) a[7] - (int) b[7];
diff --git a/sql/records.h b/sql/records.h
index 27e861cbd1e..57467d665d4 100644
--- a/sql/records.h
+++ b/sql/records.h
@@ -48,7 +48,7 @@ struct READ_RECORD
typedef int (*Setup_func)(struct st_join_table*);
TABLE *table; /* Head-form */
- handler *file;
+ //handler *file;
TABLE **forms; /* head and ref forms */
Unlock_row_func unlock_row;
Read_func read_record;
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 3aa9a4f3488..8ff70dff825 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -455,14 +455,14 @@ public:
/**
Save pointer to Annotate_rows event and switch on the
- binlog_annotate_rows_events for this sql thread.
+ binlog_annotate_row_events for this sql thread.
To be called when sql thread recieves an Annotate_rows event.
*/
inline void set_annotate_event(Annotate_rows_log_event *event)
{
free_annotate_event();
m_annotate_event= event;
- sql_thd->variables.binlog_annotate_rows_events= 1;
+ sql_thd->variables.binlog_annotate_row_events= 1;
}
/**
@@ -476,7 +476,7 @@ public:
/**
Delete saved Annotate_rows event (if any) and switch off the
- binlog_annotate_rows_events for this sql thread.
+ binlog_annotate_row_events for this sql thread.
To be called when sql thread has applied the last (i.e. with
STMT_END_F flag) rbr event.
*/
@@ -484,7 +484,7 @@ public:
{
if (m_annotate_event)
{
- sql_thd->variables.binlog_annotate_rows_events= 0;
+ sql_thd->variables.binlog_annotate_row_events= 0;
delete m_annotate_event;
m_annotate_event= 0;
}
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index ed9a05f3d5f..e1c16b31b14 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -6554,3 +6554,7 @@ ER_QUERY_CACHE_IS_GLOBALY_DISABLED
eng "Query cache is globally disabled and you can't enable it only for this session"
ER_VIEW_ORDERBY_IGNORED
eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already."
+ER_CONNECTION_KILLED 70100
+ eng "Connection was killed"
+ER_INTERNAL_ERROR
+ eng "Internal error: '%-.192s'"
diff --git a/sql/slave.cc b/sql/slave.cc
index 96e9beedc5b..bd6e9aa69f9 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -650,7 +650,8 @@ terminate_slave_thread(THD *thd,
int err __attribute__((unused))= pthread_kill(thd->real_id, thr_client_alarm);
DBUG_ASSERT(err != EINVAL);
#endif
- thd->awake(THD::NOT_KILLED);
+ thd->awake(NOT_KILLED);
+
mysql_mutex_unlock(&thd->LOCK_thd_data);
/*
@@ -1189,6 +1190,8 @@ bool is_network_error(uint errorno)
errorno == CR_SERVER_GONE_ERROR ||
errorno == CR_SERVER_LOST ||
errorno == ER_CON_COUNT_ERROR ||
+ errorno == ER_CONNECTION_KILLED ||
+ errorno == ER_NEW_ABORTING_CONNECTION ||
errorno == ER_SERVER_SHUTDOWN)
return TRUE;
@@ -2266,7 +2269,7 @@ static int request_dump(THD *thd, MYSQL* mysql, Master_info* mi,
*suppress_warnings= FALSE;
- if (opt_log_slave_updates && opt_replicate_annotate_rows_events)
+ if (opt_log_slave_updates && opt_replicate_annotate_row_events)
binlog_flags|= BINLOG_SEND_ANNOTATE_ROWS_EVENT;
if (RUN_HOOK(binlog_relay_io,
@@ -3363,11 +3366,11 @@ pthread_handler_t handle_slave_sql(void *arg)
thd->temporary_tables = rli->save_temporary_tables; // restore temp tables
set_thd_in_use_temporary_tables(rli); // (re)set sql_thd in use for saved temp tables
/*
- binlog_annotate_rows_events must be TRUE only after an Annotate_rows event
+ binlog_annotate_row_events must be TRUE only after an Annotate_rows event
has been recieved and only till the last corresponding rbr event has been
applied. In all other cases it must be FALSE.
*/
- thd->variables.binlog_annotate_rows_events= 0;
+ thd->variables.binlog_annotate_row_events= 0;
mysql_mutex_lock(&LOCK_thread_count);
threads.append(thd);
mysql_mutex_unlock(&LOCK_thread_count);
diff --git a/sql/slave.h b/sql/slave.h
index 6c1305e2ee0..7bee83af744 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -122,7 +122,7 @@ extern char *opt_relay_logname, *opt_relaylog_index_name;
extern my_bool opt_skip_slave_start, opt_reckless_slave;
extern my_bool opt_log_slave_updates;
extern char *opt_slave_skip_errors;
-extern my_bool opt_replicate_annotate_rows_events;
+extern my_bool opt_replicate_annotate_row_events;
extern ulonglong relay_log_space_limit;
/*
diff --git a/sql/sp.cc b/sql/sp.cc
index af864989abd..56827d410b7 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -34,7 +34,7 @@
static bool
create_string(THD *thd, String *buf,
- int sp_type,
+ stored_procedure_type sp_type,
const char *db, ulong dblen,
const char *name, ulong namelen,
const char *params, ulong paramslen,
@@ -46,7 +46,8 @@ create_string(THD *thd, String *buf,
ulonglong sql_mode);
static int
-db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
+db_load_routine(THD *thd, stored_procedure_type type, sp_name *name,
+ sp_head **sphp,
ulonglong sql_mode, const char *params, const char *returns,
const char *body, st_sp_chistics &chistics,
const char *definer, longlong created, longlong modified,
@@ -475,7 +476,8 @@ static TABLE *open_proc_table_for_update(THD *thd)
*/
static int
-db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table)
+db_find_routine_aux(THD *thd, stored_procedure_type type, sp_name *name,
+ TABLE *table)
{
uchar key[MAX_KEY_LENGTH]; // db, name, optional key length type
DBUG_ENTER("db_find_routine_aux");
@@ -528,7 +530,8 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table)
*/
static int
-db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp)
+db_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
+ sp_head **sphp)
{
TABLE *table;
const char *params, *returns, *body;
@@ -797,7 +800,8 @@ Bad_db_error_handler::handle_condition(THD *thd,
static int
-db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
+db_load_routine(THD *thd, stored_procedure_type type,
+ sp_name *name, sp_head **sphp,
ulonglong sql_mode, const char *params, const char *returns,
const char *body, st_sp_chistics &chistics,
const char *definer, longlong created, longlong modified,
@@ -966,7 +970,7 @@ sp_returns_type(THD *thd, String &result, sp_head *sp)
*/
int
-sp_create_routine(THD *thd, int type, sp_head *sp)
+sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
{
int ret;
TABLE *table;
@@ -984,7 +988,8 @@ sp_create_routine(THD *thd, int type, sp_head *sp)
bool save_binlog_row_based;
DBUG_ENTER("sp_create_routine");
- DBUG_PRINT("enter", ("type: %d name: %.*s",type, (int) sp->m_name.length,
+ DBUG_PRINT("enter", ("type: %d name: %.*s", (int) type,
+ (int) sp->m_name.length,
sp->m_name.str));
String retstr(64);
retstr.set_charset(system_charset_info);
@@ -1236,7 +1241,7 @@ done:
*/
int
-sp_drop_routine(THD *thd, int type, sp_name *name)
+sp_drop_routine(THD *thd, stored_procedure_type type, sp_name *name)
{
TABLE *table;
int ret;
@@ -1317,7 +1322,8 @@ sp_drop_routine(THD *thd, int type, sp_name *name)
*/
int
-sp_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
+sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
+ st_sp_chistics *chistics)
{
TABLE *table;
int ret;
@@ -1326,7 +1332,8 @@ sp_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
MDL_key::FUNCTION : MDL_key::PROCEDURE;
DBUG_ENTER("sp_update_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",
- type, (int) name->m_name.length, name->m_name.str));
+ (int) type,
+ (int) name->m_name.length, name->m_name.str));
DBUG_ASSERT(type == TYPE_ENUM_PROCEDURE ||
type == TYPE_ENUM_FUNCTION);
@@ -1590,7 +1597,7 @@ err:
*/
bool
-sp_show_create_routine(THD *thd, int type, sp_name *name)
+sp_show_create_routine(THD *thd, stored_procedure_type type, sp_name *name)
{
sp_head *sp;
@@ -1646,8 +1653,8 @@ sp_show_create_routine(THD *thd, int type, sp_name *name)
*/
sp_head *
-sp_find_routine(THD *thd, int type, sp_name *name, sp_cache **cp,
- bool cache_only)
+sp_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
+ sp_cache **cp, bool cache_only)
{
sp_head *sp;
ulong depth= (type == TYPE_ENUM_PROCEDURE ?
@@ -1876,7 +1883,7 @@ bool sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena,
*/
void sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena,
- sp_name *rt, char rt_type)
+ sp_name *rt, enum stored_procedure_type rt_type)
{
MDL_key key((rt_type == TYPE_ENUM_FUNCTION) ? MDL_key::FUNCTION :
MDL_key::PROCEDURE,
@@ -2014,7 +2021,7 @@ int sp_cache_routine(THD *thd, Sroutine_hash_entry *rt,
char qname_buff[NAME_LEN*2+1+1];
sp_name name(&rt->mdl_request.key, qname_buff);
MDL_key::enum_mdl_namespace mdl_type= rt->mdl_request.key.mdl_namespace();
- int type= ((mdl_type == MDL_key::FUNCTION) ?
+ stored_procedure_type type= ((mdl_type == MDL_key::FUNCTION) ?
TYPE_ENUM_FUNCTION : TYPE_ENUM_PROCEDURE);
/*
@@ -2049,7 +2056,7 @@ int sp_cache_routine(THD *thd, Sroutine_hash_entry *rt,
@retval non-0 Error while loading routine from mysql,proc table.
*/
-int sp_cache_routine(THD *thd, int type, sp_name *name,
+int sp_cache_routine(THD *thd, enum stored_procedure_type type, sp_name *name,
bool lookup_only, sp_head **sp)
{
int ret= 0;
@@ -2060,7 +2067,6 @@ int sp_cache_routine(THD *thd, int type, sp_name *name,
DBUG_ASSERT(type == TYPE_ENUM_FUNCTION || type == TYPE_ENUM_PROCEDURE);
-
*sp= sp_cache_lookup(spc, name);
if (lookup_only)
@@ -2128,7 +2134,7 @@ int sp_cache_routine(THD *thd, int type, sp_name *name,
*/
static bool
create_string(THD *thd, String *buf,
- int type,
+ stored_procedure_type type,
const char *db, ulong dblen,
const char *name, ulong namelen,
const char *params, ulong paramslen,
@@ -2221,7 +2227,7 @@ create_string(THD *thd, String *buf,
sp_head *
sp_load_for_information_schema(THD *thd, TABLE *proc_table, String *db,
- String *name, ulong sql_mode, int type,
+ String *name, ulong sql_mode, stored_procedure_type type,
const char *returns, const char *params,
bool *free_sp_head)
{
diff --git a/sql/sp.h b/sql/sp.h
index a06759663a7..3353132346b 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -36,6 +36,17 @@ struct TABLE_LIST;
typedef struct st_hash HASH;
template <typename T> class SQL_I_List;
+/*
+ Values for the type enum. This reflects the order of the enum declaration
+ in the CREATE TABLE command.
+*/
+enum stored_procedure_type
+{
+ TYPE_ENUM_FUNCTION=1,
+ TYPE_ENUM_PROCEDURE=2,
+ TYPE_ENUM_TRIGGER=3,
+ TYPE_ENUM_PROXY=4
+};
/* Tells what SP_DEFAULT_ACCESS should be mapped to */
#define SP_DEFAULT_ACCESS_MAPPING SP_CONTAINS_SQL
@@ -97,7 +108,7 @@ sp_drop_db_routines(THD *thd, char *db);
bool lock_db_routines(THD *thd, char *db);
sp_head *
-sp_find_routine(THD *thd, int type, sp_name *name,
+sp_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
sp_cache **cp, bool cache_only);
int
@@ -106,23 +117,24 @@ sp_cache_routine(THD *thd, Sroutine_hash_entry *rt,
int
-sp_cache_routine(THD *thd, int type, sp_name *name,
+sp_cache_routine(THD *thd, stored_procedure_type type, sp_name *name,
bool lookup_only, sp_head **sp);
bool
sp_exist_routines(THD *thd, TABLE_LIST *procs, bool any);
bool
-sp_show_create_routine(THD *thd, int type, sp_name *name);
+sp_show_create_routine(THD *thd, stored_procedure_type type, sp_name *name);
int
-sp_create_routine(THD *thd, int type, sp_head *sp);
+sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp);
int
-sp_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics);
+sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
+ st_sp_chistics *chistics);
int
-sp_drop_routine(THD *thd, int type, sp_name *name);
+sp_drop_routine(THD *thd, stored_procedure_type type, sp_name *name);
/**
@@ -166,7 +178,7 @@ public:
Procedures for handling sets of stored routines used by statement or routine.
*/
void sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena,
- sp_name *rt, char rt_type);
+ sp_name *rt, stored_procedure_type rt_type);
bool sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena,
const MDL_key *key, TABLE_LIST *belong_to_view);
void sp_remove_not_own_routines(Query_tables_list *prelocking_ctx);
@@ -188,7 +200,7 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_backup *backup);
sp_head *
sp_load_for_information_schema(THD *thd, TABLE *proc_table, String *db,
- String *name, ulong sql_mode, int type,
+ String *name, ulong sql_mode, stored_procedure_type type,
const char *returns, const char *params,
bool *free_sp_head);
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index f8802acb75b..e82f1b92312 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -1505,7 +1505,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
If the DB has changed, the pointer has changed too, but the
original thd->db will then have been freed
*/
- if (cur_db_changed && thd->killed != THD::KILL_CONNECTION)
+ if (cur_db_changed && thd->killed != KILL_CONNECTION)
{
/*
Force switching back to the saved current database, because it may be
@@ -1940,7 +1940,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
thd->variables.option_bits= binlog_save_options;
if (thd->binlog_evt_union.unioned_events)
{
- int errcode = query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode = query_error_code(thd, thd->killed == NOT_KILLED);
Query_log_event qinfo(thd, binlog_buf.ptr(), binlog_buf.length(),
thd->binlog_evt_union.unioned_events_trans, FALSE, FALSE, errcode);
if (mysql_bin_log.write(&qinfo) &&
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 5442d5d84b4..96e119f23bc 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -29,7 +29,7 @@
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#include "sql_class.h" // THD, set_var.h: THD
#include "set_var.h" // Item
-
+#include "sp.h"
#include <stddef.h>
/**
@@ -37,12 +37,6 @@
@ingroup Runtime_Environment
@{
*/
-// Values for the type enum. This reflects the order of the enum declaration
-// in the CREATE TABLE command.
-#define TYPE_ENUM_FUNCTION 1
-#define TYPE_ENUM_PROCEDURE 2
-#define TYPE_ENUM_TRIGGER 3
-#define TYPE_ENUM_PROXY 4
Item_result
sp_map_result_type(enum enum_field_types type);
@@ -164,8 +158,7 @@ public:
HAS_SQLCOM_FLUSH= 2048
};
- /** TYPE_ENUM_FUNCTION, TYPE_ENUM_PROCEDURE or TYPE_ENUM_TRIGGER */
- int m_type;
+ stored_procedure_type m_type;
uint m_flags; // Boolean attributes of a stored routine
Create_field m_return_field_def; /**< This is used for FUNCTIONs only. */
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index 74bee6e856c..6fe4be989db 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -419,7 +419,7 @@ sp_rcontext::activate_handler(THD *thd,
/* Reset error state. */
thd->clear_error();
- thd->killed= THD::NOT_KILLED; // Some errors set thd->killed
+ thd->killed= NOT_KILLED; // Some errors set thd->killed
// (e.g. "bad data").
/* Return IP of the activated SQL handler. */
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 7a453bc6090..e4aa41b30ab 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2420,7 +2420,7 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
table->field[next_field+2]->store((longlong) mqh.conn_per_hour, TRUE);
if (table->s->fields >= 36 &&
(mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS))
- table->field[next_field+3]->store((longlong) mqh.user_conn, TRUE);
+ table->field[next_field+3]->store((longlong) mqh.user_conn, FALSE);
mqh_used= mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour;
next_field+= 4;
@@ -5169,7 +5169,8 @@ ulong get_column_grant(THD *thd, GRANT_INFO *grant,
/* Help function for mysql_show_grants */
-static void add_user_option(String *grant, ulong value, const char *name)
+static void add_user_option(String *grant, long value, const char *name,
+ my_bool is_signed)
{
if (value)
{
@@ -5177,7 +5178,7 @@ static void add_user_option(String *grant, ulong value, const char *name)
grant->append(' ');
grant->append(name, strlen(name));
grant->append(' ');
- p=int10_to_str(value, buff, 10);
+ p=int10_to_str(value, buff, is_signed ? -10 : 10);
grant->append(buff,p-buff);
}
}
@@ -5359,13 +5360,13 @@ bool mysql_show_grants(THD *thd,LEX_USER *lex_user)
if (want_access & GRANT_ACL)
global.append(STRING_WITH_LEN(" GRANT OPTION"));
add_user_option(&global, acl_user->user_resource.questions,
- "MAX_QUERIES_PER_HOUR");
+ "MAX_QUERIES_PER_HOUR", 0);
add_user_option(&global, acl_user->user_resource.updates,
- "MAX_UPDATES_PER_HOUR");
+ "MAX_UPDATES_PER_HOUR", 0);
add_user_option(&global, acl_user->user_resource.conn_per_hour,
- "MAX_CONNECTIONS_PER_HOUR");
+ "MAX_CONNECTIONS_PER_HOUR", 0);
add_user_option(&global, acl_user->user_resource.user_conn,
- "MAX_USER_CONNECTIONS");
+ "MAX_USER_CONNECTIONS", 1);
}
protocol->prepare_for_resend();
protocol->store(global.ptr(),global.length(),global.charset());
@@ -9097,11 +9098,16 @@ bool acl_authenticate(THD *thd, uint connect_errors,
DBUG_RETURN(1);
}
- /* Don't allow the user to connect if he has done too many queries */
- if ((acl_user->user_resource.questions || acl_user->user_resource.updates ||
+ /*
+ Don't allow the user to connect if he has done too many queries.
+ As we are testing max_user_connections == 0 here, it means that we
+ can't let the user change max_user_connections from 0 in the server
+ without a restart as it would lead to wrong connect counting.
+ */
+ if ((acl_user->user_resource.questions ||
+ acl_user->user_resource.updates ||
acl_user->user_resource.conn_per_hour ||
- acl_user->user_resource.user_conn ||
- global_system_variables.max_user_connections) &&
+ acl_user->user_resource.user_conn || max_user_connections_checking) &&
get_or_create_user_conn(thd,
(opt_old_style_user_limits ? sctx->user : sctx->priv_user),
(opt_old_style_user_limits ? sctx->host_or_ip : sctx->priv_host),
@@ -9114,7 +9120,7 @@ bool acl_authenticate(THD *thd, uint connect_errors,
if (thd->user_connect &&
(thd->user_connect->user_resources.conn_per_hour ||
thd->user_connect->user_resources.user_conn ||
- global_system_variables.max_user_connections) &&
+ max_user_connections_checking) &&
check_for_max_user_connections(thd, thd->user_connect))
{
/* Ensure we don't decrement thd->user_connections->connections twice */
@@ -9340,39 +9346,6 @@ static struct st_mysql_auth old_password_handler=
old_password_authenticate
};
-mysql_declare_plugin(mysql_password)
-{
- MYSQL_AUTHENTICATION_PLUGIN, /* type constant */
- &native_password_handler, /* type descriptor */
- native_password_plugin_name.str, /* Name */
- "R.J.Silk, Sergei Golubchik", /* Author */
- "Native MySQL authentication", /* Description */
- PLUGIN_LICENSE_GPL, /* License */
- NULL, /* Init function */
- NULL, /* Deinit function */
- 0x0100, /* Version (1.0) */
- NULL, /* status variables */
- NULL, /* system variables */
- NULL, /* config options */
- 0, /* flags */
-},
-{
- MYSQL_AUTHENTICATION_PLUGIN, /* type constant */
- &old_password_handler, /* type descriptor */
- old_password_plugin_name.str, /* Name */
- "R.J.Silk, Sergei Golubchik", /* Author */
- "Old MySQL-4.0 authentication", /* Description */
- PLUGIN_LICENSE_GPL, /* License */
- NULL, /* Init function */
- NULL, /* Deinit function */
- 0x0100, /* Version (1.0) */
- NULL, /* status variables */
- NULL, /* system variables */
- NULL, /* config options */
- 0, /* flags */
-}
-mysql_declare_plugin_end;
-
maria_declare_plugin(mysql_password)
{
MYSQL_AUTHENTICATION_PLUGIN, /* type constant */
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 892757d77fe..2411c9c33ea 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -65,7 +65,7 @@ bool
No_such_table_error_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level,
+ MYSQL_ERROR::enum_warning_level level,
const char*,
MYSQL_ERROR ** cond_hdl)
{
@@ -76,7 +76,8 @@ No_such_table_error_handler::handle_condition(THD *,
return TRUE;
}
- m_unhandled_errors++;
+ if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ m_unhandled_errors++;
return FALSE;
}
@@ -982,7 +983,7 @@ static void kill_delayed_threads_for_table(TABLE_SHARE *share)
if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
! in_use->killed)
{
- in_use->killed= THD::KILL_CONNECTION;
+ in_use->killed= KILL_SYSTEM_THREAD;
mysql_mutex_lock(&in_use->mysys_var->mutex);
if (in_use->mysys_var->current_cond)
{
@@ -6718,11 +6719,25 @@ find_field_in_tables(THD *thd, Item_ident *item,
{
SELECT_LEX *current_sel= thd->lex->current_select;
SELECT_LEX *last_select= table_ref->select_lex;
+ bool all_merged= TRUE;
+ for (SELECT_LEX *sl= current_sel; sl && sl!=last_select;
+ sl=sl->outer_select())
+ {
+ Item *subs= sl->master_unit()->item;
+ if (subs->type() == Item::SUBSELECT_ITEM &&
+ ((Item_subselect*)subs)->substype() == Item_subselect::IN_SUBS &&
+ ((Item_in_subselect*)subs)->test_strategy(SUBS_SEMI_JOIN))
+ {
+ continue;
+ }
+ all_merged= FALSE;
+ break;
+ }
/*
If the field was an outer referencee, mark all selects using this
sub query as dependent on the outer query
*/
- if (current_sel != last_select)
+ if (!all_merged && current_sel != last_select)
{
mark_select_range_as_dependent(thd, last_select, current_sel,
found, *ref, item);
@@ -8544,7 +8559,6 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
{
SELECT_LEX *select_lex= thd->lex->current_select;
TABLE_LIST *table= NULL; // For HP compilers
- TABLE_LIST *save_emb_on_expr_nest= thd->thd_marker.emb_on_expr_nest;
List_iterator<TABLE_LIST> ti(leaves);
/*
it_is_update set to TRUE when tables of primary SELECT_LEX (SELECT_LEX
@@ -8581,7 +8595,6 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
goto err_no_arena;
}
- thd->thd_marker.emb_on_expr_nest= NO_JOIN_NEST;
if (*conds)
{
thd->where="where clause";
@@ -8595,11 +8608,11 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
*/
if ((*conds)->type() == Item::FIELD_ITEM && !derived)
wrap_ident(thd, conds);
+ (*conds)->mark_as_condition_AND_part(NO_JOIN_NEST);
if ((!(*conds)->fixed && (*conds)->fix_fields(thd, conds)) ||
(*conds)->check_cols(1))
goto err_no_arena;
}
- thd->thd_marker.emb_on_expr_nest= save_emb_on_expr_nest;
/*
Apply fix_fields() to all ON clauses at all levels of nesting,
@@ -8615,8 +8628,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
if (embedded->on_expr)
{
/* Make a join an a expression */
- thd->thd_marker.emb_on_expr_nest= embedded;
thd->where="on clause";
+ embedded->on_expr->mark_as_condition_AND_part(embedded);
if ((!embedded->on_expr->fixed &&
embedded->on_expr->fix_fields(thd, &embedded->on_expr)) ||
embedded->on_expr->check_cols(1))
@@ -8640,7 +8653,6 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
}
}
}
- thd->thd_marker.emb_on_expr_nest= save_emb_on_expr_nest;
if (!thd->stmt_arena->is_conventional())
{
@@ -9093,7 +9105,7 @@ bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use,
if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
!in_use->killed)
{
- in_use->killed= THD::KILL_CONNECTION;
+ in_use->killed= KILL_SYSTEM_THREAD;
mysql_mutex_lock(&in_use->mysys_var->mutex);
if (in_use->mysys_var->current_cond)
mysql_cond_broadcast(in_use->mysys_var->current_cond);
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 452cc0b02b8..e3c9ecc1a36 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -498,11 +498,12 @@ static void make_base_query(String *new_query,
continue; // Continue with next symbol
case '/': // Start of comment ?
/*
- Comment of format /#!number #/, must be skipped.
+ Comment of format /#!number #/ or /#M!number #/, must be skipped.
These may include '"' and other comments, but it should
be safe to parse the content as a normal string.
*/
- if (query[0] != '*' || query[1] == '!')
+ if (query[0] != '*' || query[1] == '!' ||
+ (query[1] == 'M' && query[2] == '!'))
break;
query++; // skip "/"
@@ -4453,7 +4454,7 @@ void Query_cache::wreck(uint line, const char *message)
DBUG_PRINT("warning", ("%5d QUERY CACHE WRECK => DISABLED",line));
DBUG_PRINT("warning", ("=================================="));
if (thd)
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
cache_dump();
/* check_integrity(0); */ /* Can't call it here because of locks */
bins_dump();
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index e1ff29e0b24..259e5f2fd4f 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -282,7 +282,7 @@ bool Foreign_key::validate(List<Create_field> &table_fields)
*/
void thd_set_killed(THD *thd)
{
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
}
/**
@@ -726,7 +726,7 @@ THD::THD()
:Statement(&main_lex, &main_mem_root, STMT_CONVENTIONAL_EXECUTION,
/* statement id */ 0),
rli_fake(0),
- in_sub_stmt(0),
+ in_sub_stmt(0), log_all_errors(0),
binlog_unsafe_warning_flags(0),
binlog_table_maps(0),
table_map_for_update(0),
@@ -1058,7 +1058,7 @@ MYSQL_ERROR* THD::raise_condition(uint sql_errno,
push_warning and strict SQL_MODE case.
*/
level= MYSQL_ERROR::WARN_LEVEL_ERROR;
- killed= THD::KILL_BAD_DATA;
+ killed= KILL_BAD_DATA;
}
switch (level)
@@ -1521,17 +1521,27 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
@note Do always call this while holding LOCK_thd_data.
*/
-void THD::awake(THD::killed_state state_to_set)
+void THD::awake(killed_state state_to_set)
{
DBUG_ENTER("THD::awake");
DBUG_PRINT("enter", ("this: %p current_thd: %p", this, current_thd));
THD_CHECK_SENTRY(this);
mysql_mutex_assert_owner(&LOCK_thd_data);
+ if (global_system_variables.log_warnings > 3)
+ {
+ Security_context *sctx= security_ctx;
+ sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
+ thread_id,(db ? db : "unconnected"),
+ sctx->user ? sctx->user : "unauthenticated",
+ sctx->host_or_ip,
+ "KILLED");
+ }
+
/* Set the 'killed' flag of 'this', which is the target THD object. */
killed= state_to_set;
- if (state_to_set != THD::KILL_QUERY)
+ if (state_to_set >= KILL_CONNECTION || state_to_set == NOT_KILLED)
{
#ifdef SIGNAL_WITH_VIO_CLOSE
if (this != current_thd)
@@ -1652,7 +1662,7 @@ void THD::disconnect()
mysql_mutex_lock(&LOCK_thd_data);
- killed= THD::KILL_CONNECTION;
+ killed= KILL_CONNECTION;
#ifdef SIGNAL_WITH_VIO_CLOSE
/*
@@ -1673,6 +1683,37 @@ void THD::disconnect()
/*
+ Get error number for killed state
+ Note that the error message can't have any parameters.
+ See thd::kill_message()
+*/
+
+int killed_errno(killed_state killed)
+{
+ switch (killed) {
+ case NOT_KILLED:
+ case KILL_HARD_BIT:
+ return 0; // Probably wrong usage
+ case KILL_BAD_DATA:
+ case KILL_BAD_DATA_HARD:
+ return 0; // Not a real error
+ case KILL_CONNECTION:
+ case KILL_CONNECTION_HARD:
+ case KILL_SYSTEM_THREAD:
+ case KILL_SYSTEM_THREAD_HARD:
+ return ER_CONNECTION_KILLED;
+ case KILL_QUERY:
+ case KILL_QUERY_HARD:
+ return ER_QUERY_INTERRUPTED;
+ case KILL_SERVER:
+ case KILL_SERVER_HARD:
+ return ER_SERVER_SHUTDOWN;
+ }
+ return 0; // Keep compiler happy
+}
+
+
+/*
Remember the location of thread info, the structure needed for
sql_alloc() and the structure for the net buffer
*/
@@ -2912,26 +2953,32 @@ bool select_max_min_finder_subselect::cmp_real()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
double val1= cache->val_real(), val2= maxmin->val_real();
+
+ /* Ignore NULLs for ANY and keep them for ALL subqueries */
+ if (cache->null_value)
+ return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
+ if (maxmin->null_value)
+ return !is_all;
+
if (fmax)
- return (cache->null_value && !maxmin->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- val1 > val2);
- return (maxmin->null_value && !cache->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- val1 < val2);
+ return(val1 > val2);
+ return (val1 < val2);
}
bool select_max_min_finder_subselect::cmp_int()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
longlong val1= cache->val_int(), val2= maxmin->val_int();
+
+ /* Ignore NULLs for ANY and keep them for ALL subqueries */
+ if (cache->null_value)
+ return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
+ if (maxmin->null_value)
+ return !is_all;
+
if (fmax)
- return (cache->null_value && !maxmin->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- val1 > val2);
- return (maxmin->null_value && !cache->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- val1 < val2);
+ return(val1 > val2);
+ return (val1 < val2);
}
bool select_max_min_finder_subselect::cmp_decimal()
@@ -2939,13 +2986,16 @@ bool select_max_min_finder_subselect::cmp_decimal()
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
my_decimal cval, *cvalue= cache->val_decimal(&cval);
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
+
+ /* Ignore NULLs for ANY and keep them for ALL subqueries */
+ if (cache->null_value)
+ return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
+ if (maxmin->null_value)
+ return !is_all;
+
if (fmax)
- return (cache->null_value && !maxmin->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- my_decimal_cmp(cvalue, mvalue) > 0) ;
- return (maxmin->null_value && !cache->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- my_decimal_cmp(cvalue,mvalue) < 0);
+ return (my_decimal_cmp(cvalue, mvalue) > 0) ;
+ return (my_decimal_cmp(cvalue,mvalue) < 0);
}
bool select_max_min_finder_subselect::cmp_str()
@@ -2958,13 +3008,16 @@ bool select_max_min_finder_subselect::cmp_str()
*/
val1= cache->val_str(&buf1);
val2= maxmin->val_str(&buf1);
+
+ /* Ignore NULLs for ANY and keep them for ALL subqueries */
+ if (cache->null_value)
+ return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
+ if (maxmin->null_value)
+ return !is_all;
+
if (fmax)
- return (cache->null_value && !maxmin->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- sortcmp(val1, val2, cache->collation.collation) > 0) ;
- return (maxmin->null_value && !cache->null_value) ||
- (!cache->null_value && !maxmin->null_value &&
- sortcmp(val1, val2, cache->collation.collation) < 0);
+ return (sortcmp(val1, val2, cache->collation.collation) > 0) ;
+ return (sortcmp(val1, val2, cache->collation.collation) < 0);
}
int select_exists_subselect::send_data(List<Item> &items)
@@ -3697,11 +3750,15 @@ void THD::restore_backup_open_tables_state(Open_tables_backup *backup)
@param thd user thread
@retval 0 the user thread is active
@retval 1 the user thread has been killed
+
+ This is used to signal a storage engine if it should be killed.
*/
extern "C" int thd_killed(const MYSQL_THD thd)
{
- return(thd->killed);
+ if (!(thd->killed & KILL_HARD_BIT))
+ return 0;
+ return thd->killed;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 284a9d9e42e..d842aa39045 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -398,6 +398,36 @@ typedef enum enum_diag_condition_item_name
*/
extern const LEX_STRING Diag_condition_item_names[];
+/* Note: these states are actually bit coded with HARD */
+enum killed_state
+{
+ NOT_KILLED= 0,
+ KILL_HARD_BIT= 1, /* Bit for HARD KILL */
+ KILL_BAD_DATA= 2,
+ KILL_BAD_DATA_HARD= 3,
+ KILL_QUERY= 4,
+ KILL_QUERY_HARD= 5,
+ /*
+ All of the following killed states will kill the connection
+ KILL_CONNECTION must be the first of these!
+ */
+ KILL_CONNECTION= 6,
+ KILL_CONNECTION_HARD= 7,
+ KILL_SYSTEM_THREAD= 8,
+ KILL_SYSTEM_THREAD_HARD= 9,
+ KILL_SERVER= 10,
+ KILL_SERVER_HARD= 11
+};
+
+extern int killed_errno(killed_state killed);
+#define killed_mask_hard(killed) ((killed_state) ((killed) & ~KILL_HARD_BIT))
+
+enum killed_type
+{
+ KILL_TYPE_ID,
+ KILL_TYPE_USER
+};
+
#include "sql_lex.h" /* Must be here */
class Delayed_insert;
@@ -483,7 +513,7 @@ typedef struct system_variables
ulong log_slow_rate_limit;
ulong binlog_format; ///< binlog format for this thd (see enum_binlog_format)
ulong progress_report_time;
- my_bool binlog_annotate_rows_events;
+ my_bool binlog_annotate_row_events;
my_bool binlog_direct_non_trans_update;
my_bool sql_log_bin;
ulong completion_type;
@@ -1664,19 +1694,12 @@ public:
uint in_sub_stmt;
/* True when opt_userstat_running is set at start of query */
bool userstat_running;
+ /* True if we want to log all errors */
+ bool log_all_errors;
/* container for handler's private per-connection data */
Ha_data ha_data[MAX_HA];
- /* Place to store various things */
- union
- {
- /*
- Used by subquery optimizations, see Item_in_subselect::emb_on_expr_nest.
- */
- TABLE_LIST *emb_on_expr_nest;
- } thd_marker;
-
bool prepare_derived_at_open;
/*
@@ -2128,14 +2151,6 @@ public:
condition. For details see the implementation of awake(),
especially the "broadcast" part.
*/
- enum killed_state
- {
- NOT_KILLED=0,
- KILL_BAD_DATA=1,
- KILL_CONNECTION=ER_SERVER_SHUTDOWN,
- KILL_QUERY=ER_QUERY_INTERRUPTED,
- KILLED_NO_VALUE /* means neither of the states */
- };
killed_state volatile killed;
/* scramble - random string sent to client on handshake */
@@ -2334,7 +2349,7 @@ public:
}
void close_active_vio();
#endif
- void awake(THD::killed_state state_to_set);
+ void awake(killed_state state_to_set);
/** Disconnect the associated communication endpoint. */
void disconnect();
@@ -2646,18 +2661,13 @@ public:
void end_statement();
inline int killed_errno() const
{
- killed_state killed_val; /* to cache the volatile 'killed' */
- return (killed_val= killed) != KILL_BAD_DATA ? killed_val : 0;
+ return ::killed_errno(killed);
}
inline void send_kill_message() const
{
int err= killed_errno();
if (err)
- {
- if ((err == KILL_CONNECTION) && !shutdown_in_progress)
- err = KILL_QUERY;
my_message(err, ER(err), MYF(0));
- }
}
/* return TRUE if we will abort query if we make a warning now */
inline bool really_abort_on_warning()
@@ -3587,9 +3597,11 @@ class select_max_min_finder_subselect :public select_subselect
Item_cache *cache;
bool (select_max_min_finder_subselect::*op)();
bool fmax;
+ bool is_all;
public:
- select_max_min_finder_subselect(Item_subselect *item_arg, bool mx)
- :select_subselect(item_arg), cache(0), fmax(mx)
+ select_max_min_finder_subselect(Item_subselect *item_arg, bool mx,
+ bool all)
+ :select_subselect(item_arg), cache(0), fmax(mx), is_all(all)
{}
void cleanup();
int send_data(List<Item> &items);
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 5dde1818c27..6ab6f3237d7 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -126,9 +126,12 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
DBUG_ENTER("check_for_max_user_connections");
mysql_mutex_lock(&LOCK_user_conn);
+
+ /* Root is not affected by the value of max_user_connections */
if (global_system_variables.max_user_connections &&
!uc->user_resources.user_conn &&
- global_system_variables.max_user_connections < (uint) uc->connections)
+ global_system_variables.max_user_connections < (uint) uc->connections &&
+ !(thd->security_ctx->master_access & SUPER_ACL))
{
my_error(ER_TOO_MANY_USER_CONNECTIONS, MYF(0), uc->user);
goto end;
@@ -224,7 +227,7 @@ void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
/* If more than a hour since last check, reset resource checking */
if (check_time - uc->reset_utime >= LL(3600000000))
{
- uc->questions=1;
+ uc->questions=0;
uc->updates=0;
uc->conn_per_hour=0;
uc->reset_utime= check_time;
@@ -253,7 +256,7 @@ bool check_mqh(THD *thd, uint check_command)
if (uc->user_resources.questions &&
uc->questions++ >= uc->user_resources.questions)
{
- my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_questions",
+ my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_queries_per_hour",
(long) uc->user_resources.questions);
error=1;
goto end;
@@ -265,7 +268,7 @@ bool check_mqh(THD *thd, uint check_command)
(sql_command_flags[check_command] & CF_CHANGES_DATA) &&
uc->updates++ >= uc->user_resources.updates)
{
- my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_updates",
+ my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_updates_per_hour",
(long) uc->user_resources.updates);
error=1;
goto end;
@@ -1112,7 +1115,7 @@ void prepare_new_connection_state(THD* thd)
execute_init_command(thd, &opt_init_connect, &LOCK_sys_init_connect);
if (thd->is_error())
{
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
thd->thread_id,(thd->db ? thd->db : "unconnected"),
sctx->user ? sctx->user : "unauthenticated",
@@ -1174,7 +1177,7 @@ bool thd_is_connection_alive(THD *thd)
NET *net= &thd->net;
if (!net->error &&
net->vio != 0 &&
- !(thd->killed == THD::KILL_CONNECTION))
+ thd->killed < KILL_CONNECTION)
return TRUE;
return FALSE;
}
@@ -1184,6 +1187,8 @@ void do_handle_one_connection(THD *thd_arg)
THD *thd= thd_arg;
thd->thr_create_utime= microsecond_interval_timer();
+ /* We need to set this because of time_out_user_resource_limits */
+ thd->start_utime= thd->thr_create_utime;
if (MYSQL_CALLBACK_ELSE(thread_scheduler, init_new_connection_thread, (), 0))
{
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 8013d2bd6e2..2301dae5fdf 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -32,6 +32,7 @@
#include "sql_base.h" // lock_table_names, tdc_remove_table
#include "sql_handler.h" // mysql_ha_rm_tables
#include <mysys_err.h>
+#include "sp_head.h"
#include "sp.h"
#include "events.h"
#include "sql_handler.h"
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index a70f00a439c..efce55cc18a 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -63,7 +63,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
order_list->first : NULL);
uint usable_index= MAX_KEY;
SELECT_LEX *select_lex= &thd->lex->select_lex;
- THD::killed_state killed_status= THD::NOT_KILLED;
+ killed_state killed_status= NOT_KILLED;
THD::enum_binlog_query_type query_type= THD::ROW_QUERY_TYPE;
DBUG_ENTER("mysql_delete");
@@ -379,7 +379,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->file->unlock_row(); // Row failed selection, release lock on it
}
killed_status= thd->killed;
- if (killed_status != THD::NOT_KILLED || thd->is_error())
+ if (killed_status != NOT_KILLED || thd->is_error())
error= 1; // Aborted
if (will_batch && (loc_error= table->file->end_bulk_delete()))
{
@@ -424,8 +424,8 @@ cleanup:
if (error < 0)
thd->clear_error();
else
- errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
-
+ errcode= query_error_code(thd, killed_status == NOT_KILLED);
+
/*
[binlog]: If 'handler::delete_all_rows()' was called and the
storage engine does not inject the rows itself, we replicate
@@ -870,7 +870,7 @@ void multi_delete::abort_result_set()
*/
if (mysql_bin_log.is_open())
{
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
/* possible error of writing binary log is ignored deliberately */
(void) thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
@@ -1019,7 +1019,7 @@ int multi_delete::do_table_deletes(TABLE *table, bool ignore)
bool multi_delete::send_eof()
{
- THD::killed_state killed_status= THD::NOT_KILLED;
+ killed_state killed_status= NOT_KILLED;
thd_proc_info(thd, "deleting from reference tables");
/* Does deletes for the last n - 1 tables, returns 0 if ok */
@@ -1027,7 +1027,7 @@ bool multi_delete::send_eof()
/* compute a total error to know if something failed */
local_error= local_error || error;
- killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
+ killed_status= (local_error == 0)? NOT_KILLED : thd->killed;
/* reset used flags */
thd_proc_info(thd, "end");
@@ -1050,7 +1050,7 @@ bool multi_delete::send_eof()
if (local_error == 0)
thd->clear_error();
else
- errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
+ errcode= query_error_code(thd, killed_status == NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
transactional_tables, FALSE, FALSE, errcode) &&
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f8327ccee04..70c66d6cf29 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1026,7 +1026,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->clear_error();
}
else
- errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ errcode= query_error_code(thd, thd->killed == NOT_KILLED);
/* bug#22725:
@@ -1040,7 +1040,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
routines did not result in any error due to the KILLED. In
such case the flag is ignored for constructing binlog event.
*/
- DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0);
+ DBUG_ASSERT(thd->killed != KILL_BAD_DATA || error > 0);
if (was_insert_delayed && table_list->lock_type == TL_WRITE)
{
/* Binlog INSERT DELAYED as INSERT without DELAYED. */
@@ -2136,7 +2136,7 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
/*
Annotating delayed inserts is not supported.
*/
- di->thd.variables.binlog_annotate_rows_events= 0;
+ di->thd.variables.binlog_annotate_row_events= 0;
di->thd.set_db(table_list->db, (uint) strlen(table_list->db));
di->thd.set_query(my_strdup(table_list->table_name,
@@ -2522,7 +2522,7 @@ void kill_delayed_threads(void)
Delayed_insert *di;
while ((di= it++))
{
- di->thd.killed= THD::KILL_CONNECTION;
+ di->thd.killed= KILL_CONNECTION;
mysql_mutex_lock(&di->thd.LOCK_thd_data);
if (di->thd.mysys_var)
{
@@ -2660,7 +2660,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
thd->set_current_time();
threads.append(thd);
- thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED;
+ thd->killed=abort_loop ? KILL_CONNECTION : NOT_KILLED;
mysql_mutex_unlock(&LOCK_thread_count);
mysql_thread_set_psi_id(thd->thread_id);
@@ -2782,7 +2782,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
#endif
#endif
if (error == ETIMEDOUT || error == ETIME)
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
}
/* We can't lock di->mutex and mysys_var->mutex at the same time */
mysql_mutex_unlock(&di->mutex);
@@ -2809,7 +2809,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
if (! (thd->lock= mysql_lock_tables(thd, &di->table, 1, 0)))
{
/* Fatal error */
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
}
mysql_cond_broadcast(&di->cond_client);
}
@@ -2818,7 +2818,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
if (di->handle_inserts())
{
/* Some fatal error */
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
}
}
di->status=0;
@@ -2851,7 +2851,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
}
di->table=0;
- thd->killed= THD::KILL_CONNECTION; // If error
+ thd->killed= KILL_CONNECTION; // If error
mysql_mutex_unlock(&di->mutex);
close_thread_tables(thd); // Free the table
@@ -2937,7 +2937,7 @@ bool Delayed_insert::handle_inserts(void)
max_rows= delayed_insert_limit;
if (thd.killed || table->s->has_old_version())
{
- thd.killed= THD::KILL_CONNECTION;
+ thd.killed= KILL_SYSTEM_THREAD;
max_rows= ULONG_MAX; // Do as much as possible
}
@@ -3550,7 +3550,7 @@ bool select_insert::send_eof()
bool const trans_table= table->file->has_transactions();
ulonglong id, row_count;
bool changed;
- THD::killed_state killed_status= thd->killed;
+ killed_state killed_status= thd->killed;
DBUG_ENTER("select_insert::send_eof");
DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
trans_table, table->file->table_type()));
@@ -3591,7 +3591,7 @@ bool select_insert::send_eof()
if (!error)
thd->clear_error();
else
- errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
+ errcode= query_error_code(thd, killed_status == NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
trans_table, FALSE, FALSE, errcode))
@@ -3670,7 +3670,7 @@ void select_insert::abort_result_set() {
if (mysql_bin_log.is_open())
{
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
/* error of writing binary log is ignored */
(void) thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query(),
thd->query_length(),
@@ -4061,7 +4061,7 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
if (mysql_bin_log.is_open())
{
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
result= thd->binlog_query(THD::STMT_QUERY_TYPE,
query.ptr(), query.length(),
/* is_trans */ TRUE,
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index b0fdb7a1c42..76d01b076f0 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -226,8 +226,6 @@ void JOIN_CACHE::calc_record_fields()
flag_fields+= test(tab->table->maybe_null);
fields+= tab->used_fields;
blobs+= tab->used_blobs;
-
- fields+= tab->check_rowid_field();
}
if ((with_match_flag= join_tab->use_match_flag()))
flag_fields++;
@@ -621,7 +619,12 @@ void JOIN_CACHE::create_remaining_fields()
copy->type= CACHE_ROWID;
copy->field= 0;
copy->referenced_field_no= 0;
- length+= copy->length;
+ /*
+ Note: this may seem odd, but at this point we have
+ table->file->ref==NULL while table->file->ref_length is already set
+ to correct value.
+ */
+ length += table->file->ref_length;
data_field_count++;
copy++;
}
@@ -1298,6 +1301,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
if (with_length)
{
rec_len_ptr= cp;
+ DBUG_ASSERT(cp + size_of_rec_len <= buff + buff_size);
cp+= size_of_rec_len;
}
@@ -1307,6 +1311,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
*/
if (prev_cache)
{
+ DBUG_ASSERT(cp + prev_cache->get_size_of_rec_offset() <= buff + buff_size);
cp+= prev_cache->get_size_of_rec_offset();
prev_cache->store_rec_ref(cp, link);
}
@@ -1323,6 +1328,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
flags_pos= cp;
for ( ; copy < copy_end; copy++)
{
+ DBUG_ASSERT(cp + copy->length <= buff + buff_size);
memcpy(cp, copy->str, copy->length);
cp+= copy->length;
}
@@ -1349,6 +1355,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
{
last_rec_blob_data_is_in_rec_buff= 1;
/* Put down the length of the blob and the pointer to the data */
+ DBUG_ASSERT(cp + copy->length + sizeof(char*) <= buff + buff_size);
blob_field->get_image(cp, copy->length+sizeof(char*),
blob_field->charset());
cp+= copy->length+sizeof(char*);
@@ -1358,6 +1365,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
/* First put down the length of the blob and then copy the data */
blob_field->get_image(cp, copy->length,
blob_field->charset());
+ DBUG_ASSERT(cp + copy->length + copy->blob_length <= buff + buff_size);
memcpy(cp+copy->length, copy->str, copy->blob_length);
cp+= copy->length+copy->blob_length;
}
@@ -1368,12 +1376,14 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
case CACHE_VARSTR1:
/* Copy the significant part of the short varstring field */
len= (uint) copy->str[0] + 1;
+ DBUG_ASSERT(cp + len <= buff + buff_size);
memcpy(cp, copy->str, len);
cp+= len;
break;
case CACHE_VARSTR2:
/* Copy the significant part of the long varstring field */
len= uint2korr(copy->str) + 2;
+ DBUG_ASSERT(cp + len <= buff + buff_size);
memcpy(cp, copy->str, len);
cp+= len;
break;
@@ -1388,6 +1398,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
end > str && end[-1] == ' ';
end--) ;
len=(uint) (end-str);
+ DBUG_ASSERT(cp + len + 2 <= buff + buff_size);
int2store(cp, len);
memcpy(cp+2, str, len);
cp+= len+2;
@@ -1403,11 +1414,22 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
TABLE *table= (TABLE *) copy->str;
copy->str= table->file->ref;
copy->length= table->file->ref_length;
+ if (!copy->str)
+ {
+ /*
+ If table is an empty inner table of an outer join and it is
+ a materialized derived table then table->file->ref == NULL.
+ */
+ cp+= copy->length;
+ break;
+ }
}
/* fall through */
default:
/* Copy the entire image of the field from the record buffer */
- memcpy(cp, copy->str, copy->length);
+ DBUG_ASSERT(cp + copy->length <= buff + buff_size);
+ if (copy->str)
+ memcpy(cp, copy->str, copy->length);
cp+= copy->length;
}
}
@@ -1426,6 +1448,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
cnt++;
}
}
+ DBUG_ASSERT(cp + size_of_fld_ofs*cnt <= buff + buff_size);
cp+= size_of_fld_ofs*cnt;
}
@@ -1799,6 +1822,13 @@ uint JOIN_CACHE::read_record_field(CACHE_FIELD *copy, bool blob_in_rec_buff)
memset(copy->str+len, ' ', copy->length-len);
len+= 2;
break;
+ case CACHE_ROWID:
+ if (!copy->str)
+ {
+ len= copy->length;
+ break;
+ }
+ /* fall through */
default:
/* Copy the entire image of the field from the record buffer */
len= copy->length;
@@ -2022,6 +2052,7 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
JOIN_TAB *tab;
enum_nested_loop_state rc= NESTED_LOOP_OK;
bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join();
+ DBUG_ENTER("JOIN_CACHE::join_records");
if (outer_join_first_inner && !join_tab->first_unmatched)
join_tab->not_null_compl= TRUE;
@@ -2103,7 +2134,8 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
finish:
restore_last_record();
reset(TRUE);
- return rc;
+ DBUG_PRINT("exit", ("rc: %d", rc));
+ DBUG_RETURN(rc);
}
@@ -2165,10 +2197,11 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
join_tab->table->null_row= 0;
bool check_only_first_match= join_tab->check_only_first_match();
bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join();
+ DBUG_ENTER("JOIN_CACHE::join_matching_records");
/* Return at once if there are no records in the join buffer */
if (!records)
- return NESTED_LOOP_OK;
+ DBUG_RETURN(NESTED_LOOP_OK);
/*
When joining we read records from the join buffer back into record buffers.
@@ -2242,7 +2275,7 @@ finish:
rc= error < 0 ? NESTED_LOOP_NO_MORE_ROWS: NESTED_LOOP_ERROR;
finish2:
join_tab_scan->close();
- return rc;
+ DBUG_RETURN(rc);
}
@@ -2324,6 +2357,7 @@ bool JOIN_CACHE::set_match_flag_if_none(JOIN_TAB *first_inner,
enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr)
{
enum_nested_loop_state rc= NESTED_LOOP_OK;
+ DBUG_ENTER("JOIN_CACHE::generate_full_extensions");
/*
Check whether the extended partial join record meets
@@ -2341,16 +2375,18 @@ enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr)
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
{
reset(TRUE);
- return rc;
+ DBUG_RETURN(rc);
}
}
if (res == -1)
{
rc= NESTED_LOOP_ERROR;
- return rc;
+ DBUG_RETURN(rc);
}
}
- return rc;
+ else if (join->thd->is_error())
+ rc= NESTED_LOOP_ERROR;
+ DBUG_RETURN(rc);
}
@@ -2375,16 +2411,20 @@ enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr)
RETURN VALUE
TRUE there is a match
FALSE there is no match
+ In this case the caller must also check thd->is_error() to see
+ if there was a fatal error for the query.
*/
inline bool JOIN_CACHE::check_match(uchar *rec_ptr)
{
/* Check whether pushdown conditions are satisfied */
+ DBUG_ENTER("JOIN_CACHE:check_match");
+
if (join_tab->select && join_tab->select->skip_record(join->thd) <= 0)
- return FALSE;
+ DBUG_RETURN(FALSE);
if (!join_tab->is_last_inner_table())
- return TRUE;
+ DBUG_RETURN(TRUE);
/*
This is the last inner table of an outer join,
@@ -2397,7 +2437,7 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr)
set_match_flag_if_none(first_inner, rec_ptr);
if (first_inner->check_only_first_match() &&
!join_tab->first_inner)
- return TRUE;
+ DBUG_RETURN(TRUE);
/*
This is the first match for the outer table row.
The function set_match_flag_if_none has turned the flag
@@ -2411,13 +2451,12 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr)
for (JOIN_TAB *tab= first_inner; tab <= join_tab; tab++)
{
if (tab->select && tab->select->skip_record(join->thd) <= 0)
- return FALSE;
+ DBUG_RETURN(FALSE);
}
}
while ((first_inner= first_inner->first_upper) &&
first_inner->last_inner == join_tab);
-
- return TRUE;
+ DBUG_RETURN(TRUE);
}
@@ -2452,10 +2491,11 @@ enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
ulonglong cnt;
enum_nested_loop_state rc= NESTED_LOOP_OK;
bool is_first_inner= join_tab == join_tab->first_unmatched;
+ DBUG_ENTER("JOIN_CACHE::join_null_complements");
/* Return at once if there are no records in the join buffer */
if (!records)
- return NESTED_LOOP_OK;
+ DBUG_RETURN(NESTED_LOOP_OK);
cnt= records - (is_key_access() ? 0 : test(skip_last));
@@ -2485,7 +2525,7 @@ enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
}
finish:
- return rc;
+ DBUG_RETURN(rc);
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index f3dfd95815e..a9d542756fb 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -25,8 +25,8 @@
#include "item_create.h"
#include <m_ctype.h>
#include <hash.h>
-#include "sp.h"
#include "sp_head.h"
+#include "sp.h"
#include "sql_select.h"
static int lex_one_token(void *arg, void *yythd);
@@ -503,6 +503,7 @@ void lex_start(THD *thd)
lex->event_parse_data= NULL;
lex->profile_options= PROFILE_NONE;
lex->nest_level=0 ;
+ lex->select_lex.nest_level_base= &lex->unit;
lex->allow_sum_func= 0;
lex->in_sum_func= NULL;
/*
@@ -1499,39 +1500,39 @@ int lex_one_token(void *arg, void *yythd)
lip->save_in_comment_state();
+ if (lip->yyPeekn(2) == 'M' && lip->yyPeekn(3) == '!')
+ {
+ /* Skip MariaDB unique marker */
+ lip->set_echo(FALSE);
+ lip->yySkip();
+ /* The following if will be true */
+ }
if (lip->yyPeekn(2) == '!')
{
lip->in_comment= DISCARD_COMMENT;
/* Accept '/' '*' '!', but do not keep this marker. */
lip->set_echo(FALSE);
- lip->yySkip();
- lip->yySkip();
- lip->yySkip();
+ lip->yySkipn(3);
/*
The special comment format is very strict:
- '/' '*' '!', followed by exactly
+ '/' '*' '!', followed by an optional 'M' and exactly
1 digit (major), 2 digits (minor), then 2 digits (dot).
32302 -> 3.23.02
50032 -> 5.0.32
50114 -> 5.1.14
*/
- char version_str[6];
- version_str[0]= lip->yyPeekn(0);
- version_str[1]= lip->yyPeekn(1);
- version_str[2]= lip->yyPeekn(2);
- version_str[3]= lip->yyPeekn(3);
- version_str[4]= lip->yyPeekn(4);
- version_str[5]= 0;
- if ( my_isdigit(cs, version_str[0])
- && my_isdigit(cs, version_str[1])
- && my_isdigit(cs, version_str[2])
- && my_isdigit(cs, version_str[3])
- && my_isdigit(cs, version_str[4])
+ if ( my_isdigit(cs, lip->yyPeekn(0))
+ && my_isdigit(cs, lip->yyPeekn(1))
+ && my_isdigit(cs, lip->yyPeekn(2))
+ && my_isdigit(cs, lip->yyPeekn(3))
+ && my_isdigit(cs, lip->yyPeekn(4))
)
{
ulong version;
- version=strtol(version_str, NULL, 10);
+ char *end_ptr= (char*) lip->get_ptr()+5;
+ int error;
+ version= (ulong) my_strtoll10(lip->get_ptr(), &end_ptr, &error);
if (version <= MYSQL_VERSION_ID)
{
@@ -3455,18 +3456,19 @@ void st_select_lex::append_table_to_list(TABLE_LIST *TABLE_LIST::*link,
tl->*link= table;
}
+
/*
@brief
- Remove given table from the leaf_tables list.
+ Replace given table from the leaf_tables list for a list of tables
- @param link Offset to which list in table structure to use
- @param table Table to remove
+ @param table Table to replace
+ @param list List to substititute the table for
@details
- Remove 'table' from the leaf_tables list using the 'link' offset.
+ Replace 'table' from the leaf_tables list for a list of tables 'tbl_list'.
*/
-void st_select_lex::remove_table_from_list(TABLE_LIST *table)
+void st_select_lex::replace_leaf_table(TABLE_LIST *table, List<TABLE_LIST> &tbl_list)
{
TABLE_LIST *tl;
List_iterator<TABLE_LIST> ti(leaf_tables);
@@ -3474,7 +3476,7 @@ void st_select_lex::remove_table_from_list(TABLE_LIST *table)
{
if (tl == table)
{
- ti.remove();
+ ti.replace(tbl_list);
break;
}
}
@@ -3579,8 +3581,6 @@ bool SELECT_LEX::merge_subquery(THD *thd, TABLE_LIST *derived,
uint table_no, table_map map)
{
derived->wrap_into_nested_join(subq_select->top_join_list);
- /* Reconnect the next_leaf chain. */
- leaf_tables.concat(&subq_select->leaf_tables);
ftfunc_list->concat(subq_select->ftfunc_list);
if (join ||
@@ -3596,18 +3596,14 @@ bool SELECT_LEX::merge_subquery(THD *thd, TABLE_LIST *derived,
in_subq->emb_on_expr_nest= derived;
}
}
- /*
- Remove merged table from chain.
- When merge_subquery is called at a subquery-to-semijoin transformation
- the derived isn't in the leaf_tables list, so in this case the call of
- remove_table_from_list does not cause any actions.
- */
- remove_table_from_list(derived);
/* Walk through child's tables and adjust table map, tablenr,
* parent_lex */
subq_select->remap_tables(derived, map, table_no, this);
subq_select->merged_into= this;
+
+ replace_leaf_table(derived, subq_select->leaf_tables);
+
return FALSE;
}
@@ -3648,10 +3644,33 @@ void SELECT_LEX::update_used_tables()
{
TABLE_LIST *tl;
List_iterator<TABLE_LIST> ti(leaf_tables);
+
while ((tl= ti++))
{
- TABLE_LIST *embedding;
- embedding= tl;
+ if (tl->table && !tl->is_view_or_derived())
+ {
+ TABLE_LIST *embedding= tl->embedding;
+ for (embedding= tl->embedding; embedding; embedding=embedding->embedding)
+ {
+ if (embedding->is_view_or_derived())
+ {
+ DBUG_ASSERT(embedding->is_merged_derived());
+ TABLE *tab= tl->table;
+ tab->covering_keys= tab->s->keys_for_keyread;
+ tab->covering_keys.intersect(tab->keys_in_use_for_query);
+ tab->merge_keys.clear_all();
+ bitmap_clear_all(tab->read_set);
+ bitmap_clear_all(tab->vcol_set);
+ break;
+ }
+ }
+ }
+ }
+
+ ti.rewind();
+ while ((tl= ti++))
+ {
+ TABLE_LIST *embedding= tl;
do
{
bool maybe_null;
@@ -3680,6 +3699,7 @@ void SELECT_LEX::update_used_tables()
embedding= tl->embedding;
}
}
+
if (join->conds)
{
join->conds->update_used_tables();
@@ -3862,6 +3882,47 @@ bool st_select_lex::save_prep_leaf_tables(THD *thd)
}
+/*
+ Return true if this select_lex has been converted into a semi-join nest
+ within 'ancestor'.
+
+ We need a loop to check this because there could be several nested
+ subselects, like
+
+ SELECT ... FROM grand_parent
+ WHERE expr1 IN (SELECT ... FROM parent
+ WHERE expr2 IN ( SELECT ... FROM child)
+
+ which were converted into:
+
+ SELECT ...
+ FROM grand_parent SEMI_JOIN (parent JOIN child)
+ WHERE
+ expr1 AND expr2
+
+ In this case, both parent and child selects were merged into the parent.
+*/
+
+bool st_select_lex::is_merged_child_of(st_select_lex *ancestor)
+{
+ bool all_merged= TRUE;
+ for (SELECT_LEX *sl= this; sl && sl!=ancestor;
+ sl=sl->outer_select())
+ {
+ Item *subs= sl->master_unit()->item;
+ if (subs && subs->type() == Item::SUBSELECT_ITEM &&
+ ((Item_subselect*)subs)->substype() == Item_subselect::IN_SUBS &&
+ ((Item_in_subselect*)subs)->test_strategy(SUBS_SEMI_JOIN))
+ {
+ continue;
+ }
+ all_merged= FALSE;
+ break;
+ }
+ return all_merged;
+}
+
+
/**
A routine used by the parser to decide whether we are specifying a full
partitioning or if only partitions to add or to split.
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index bbc91c2f987..c776dfc5a8c 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -802,6 +802,13 @@ public:
ulong table_join_options;
uint in_sum_expr;
uint select_number; /* number of select (used for EXPLAIN) */
+
+ /*
+ nest_levels are local to the query or VIEW,
+ and that view merge procedure does not re-calculate them.
+ So we also have to remember unit against which we count levels.
+ */
+ SELECT_LEX_UNIT *nest_level_base;
int nest_level; /* nesting level of select */
Item_sum *inner_sum_func_list; /* list of sum func in nested selects */
uint with_wild; /* item list contain '*' */
@@ -990,7 +997,7 @@ public:
bool handle_derived(LEX *lex, uint phases);
void append_table_to_list(TABLE_LIST *TABLE_LIST::*link, TABLE_LIST *table);
bool get_free_table_map(table_map *map, uint *tablenr);
- void remove_table_from_list(TABLE_LIST *table);
+ void replace_leaf_table(TABLE_LIST *table, List<TABLE_LIST> &tbl_list);
void remap_tables(TABLE_LIST *derived, table_map map,
uint tablenr, st_select_lex *parent_lex);
bool merge_subquery(THD *thd, TABLE_LIST *derived, st_select_lex *subq_lex,
@@ -1009,6 +1016,7 @@ public:
bool save_leaf_tables(THD *thd);
bool save_prep_leaf_tables(THD *thd);
+ bool is_merged_child_of(st_select_lex *ancestor);
/*
For MODE_ONLY_FULL_GROUP_BY we need to maintain two flags:
@@ -2377,6 +2385,9 @@ struct LEX: public Query_tables_list
USER_RESOURCES mqh;
LEX_RESET_SLAVE reset_slave_info;
ulong type;
+ /* The following is used by KILL */
+ killed_state kill_signal;
+ killed_type kill_type;
/*
This variable is used in post-parse stage to declare that sum-functions,
or functions which have sense only if GROUP BY is present, are allowed.
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 5441fec5f63..894edc4516d 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -314,6 +314,26 @@ public:
friend class error_list;
friend class error_list_iterator;
+ /*
+ Debugging help: return N-th element in the list, or NULL if the list has
+ less than N elements.
+ */
+ inline void *nth_element(int n)
+ {
+ list_node *node= first;
+ void *data= NULL;
+ for (int i=0; i <= n; i++)
+ {
+ if (node == &end_of_list)
+ {
+ data= NULL;
+ break;
+ }
+ data= node->info;
+ node= node->next;
+ }
+ return data;
+ }
#ifdef LIST_EXTRA_DEBUG
/*
Check list invariants and print results into trace. Invariants are:
@@ -492,6 +512,7 @@ public:
}
empty();
}
+ inline T *nth_element(int n) { return (T*)base_list::nth_element(n); }
};
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 6f84f68eeaa..bc926118723 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -179,7 +179,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
bool is_fifo=0;
#ifndef EMBEDDED_LIBRARY
LOAD_FILE_INFO lf_info;
- THD::killed_state killed_status= THD::NOT_KILLED;
+ killed_state killed_status;
bool is_concurrent;
#endif
char *db = table_list->db; // This is never null
@@ -529,11 +529,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_EXECUTE_IF("simulate_kill_bug27571",
{
error=1;
- thd->killed= THD::KILL_QUERY;
+ thd->killed= KILL_QUERY;
};);
#ifndef EMBEDDED_LIBRARY
- killed_status= (error == 0) ? THD::NOT_KILLED : thd->killed;
+ killed_status= (error == 0) ? NOT_KILLED : thd->killed;
#endif
/*
@@ -577,8 +577,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
/* If the file was not empty, wrote_create_file is true */
if (lf_info.wrote_create_file)
{
- int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
-
+ int errcode= query_error_code(thd, killed_status == NOT_KILLED);
+
/* since there is already an error, the possible error of
writing binary log will be ignored */
if (thd->transaction.stmt.modified_non_trans_table)
@@ -630,7 +630,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
read_info.end_io_cache();
if (lf_info.wrote_create_file)
{
- int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, killed_status == NOT_KILLED);
error= write_execute_load_query_log_event(thd, ex,
table_list->db, table_list->table_name,
is_concurrent,
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 54b1e854ac1..714542ab0af 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -118,7 +118,8 @@
"FUNCTION" : "PROCEDURE")
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
-static void sql_kill(THD *thd, ulong id, bool only_kill_query);
+static void sql_kill(THD *thd, ulong id, killed_state state);
+static void sql_kill_user(THD *thd, LEX_USER *user, killed_state state);
static bool execute_show_status(THD *, TABLE_LIST *);
static bool execute_rename_table(THD *, TABLE_LIST *, TABLE_LIST *);
@@ -1362,7 +1363,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
status_var_increment(thd->status_var.com_stat[SQLCOM_KILL]);
ulong id=(ulong) uint4korr(packet);
- sql_kill(thd,id,false);
+ sql_kill(thd,id, KILL_CONNECTION_HARD);
break;
}
case COM_SET_OPTION:
@@ -1443,6 +1444,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
MYSQL_COMMAND_DONE(res);
}
+
+ /* Check that some variables are reset properly */
+ DBUG_ASSERT(thd->abort_on_warning == 0);
DBUG_RETURN(error);
}
@@ -3653,8 +3657,6 @@ end_with_restore_list:
}
case SQLCOM_KILL:
{
- Item *it= (Item *)lex->value_list.head();
-
if (lex->table_or_sp_used())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "Usage of subqueries or stored "
@@ -3662,13 +3664,20 @@ end_with_restore_list:
break;
}
- if ((!it->fixed && it->fix_fields(lex->thd, &it)) || it->check_cols(1))
+ if (lex->kill_type == KILL_TYPE_ID)
{
- my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
- MYF(0));
- goto error;
+ Item *it= (Item *)lex->value_list.head();
+ if ((!it->fixed && it->fix_fields(lex->thd, &it)) || it->check_cols(1))
+ {
+ my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
+ MYF(0));
+ goto error;
+ }
+ sql_kill(thd, (ulong) it->val_int(), lex->kill_signal);
}
- sql_kill(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY);
+ else
+ sql_kill_user(thd, get_current_user(thd, lex->users_list.head()),
+ lex->kill_signal);
break;
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -3740,7 +3749,17 @@ end_with_restore_list:
}
/* Disconnect the current client connection. */
if (tx_release)
- thd->killed= THD::KILL_CONNECTION;
+ {
+ thd->killed= KILL_CONNECTION;
+ if (global_system_variables.log_warnings > 3)
+ {
+ Security_context *sctx= &thd->main_security_ctx;
+ sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
+ thd->thread_id,(thd->db ? thd->db : "unconnected"),
+ sctx->user ? sctx->user : "unauthenticated",
+ sctx->host_or_ip, "RELEASE");
+ }
+ }
my_ok(thd);
break;
}
@@ -3770,7 +3789,7 @@ end_with_restore_list:
}
/* Disconnect the current client connection. */
if (tx_release)
- thd->killed= THD::KILL_CONNECTION;
+ thd->killed= KILL_CONNECTION;
my_ok(thd);
break;
}
@@ -4035,7 +4054,8 @@ create_sp_error:
case SQLCOM_ALTER_FUNCTION:
{
int sp_result;
- int type= (lex->sql_command == SQLCOM_ALTER_PROCEDURE ?
+ enum stored_procedure_type type;
+ type= (lex->sql_command == SQLCOM_ALTER_PROCEDURE ?
TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
if (check_routine_access(thd, ALTER_PROC_ACL, lex->spname->m_db.str,
@@ -4112,7 +4132,8 @@ create_sp_error:
#endif
int sp_result;
- int type= (lex->sql_command == SQLCOM_DROP_PROCEDURE ?
+ enum stored_procedure_type type;
+ type= (lex->sql_command == SQLCOM_DROP_PROCEDURE ?
TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
char *db= lex->spname->m_db.str;
char *name= lex->spname->m_name.str;
@@ -4199,7 +4220,7 @@ create_sp_error:
{
#ifndef DBUG_OFF
sp_head *sp;
- int type= (lex->sql_command == SQLCOM_SHOW_PROC_CODE ?
+ stored_procedure_type type= (lex->sql_command == SQLCOM_SHOW_PROC_CODE ?
TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
if (sp_cache_routine(thd, type, lex->spname, FALSE, &sp))
@@ -4445,9 +4466,9 @@ finish:
if (! thd->stmt_da->is_set())
thd->send_kill_message();
}
- if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA)
+ if (thd->killed < KILL_CONNECTION)
{
- thd->killed= THD::NOT_KILLED;
+ thd->killed= NOT_KILLED;
thd->mysys_var->abort= 0;
}
if (thd->is_error() || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR))
@@ -4518,7 +4539,6 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
param->select_limit=
new Item_int((ulonglong) thd->variables.select_limit);
}
- thd->thd_marker.emb_on_expr_nest= NULL;
if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0)))
{
if (lex->describe)
@@ -5418,7 +5438,6 @@ void THD::reset_for_next_command(bool calculate_userstat)
thd->query_plan_flags= QPLAN_INIT;
thd->query_plan_fsort_passes= 0;
- thd->thd_marker.emb_on_expr_nest= NULL;
thd->reset_current_stmt_binlog_format_row();
thd->binlog_unsafe_warning_flags= 0;
@@ -5485,6 +5504,7 @@ mysql_new_select(LEX *lex, bool move_down)
DBUG_RETURN(1);
}
select_lex->nest_level= lex->nest_level;
+ select_lex->nest_level_base= &thd->lex->unit;
if (move_down)
{
SELECT_LEX_UNIT *unit;
@@ -6508,12 +6528,13 @@ void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields,
This is written such that we have a short lock on LOCK_thread_count
*/
-uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
+uint kill_one_thread(THD *thd, ulong id, killed_state kill_signal)
{
THD *tmp;
uint error=ER_NO_SUCH_THREAD;
DBUG_ENTER("kill_one_thread");
- DBUG_PRINT("enter", ("id=%lu only_kill=%d", id, only_kill_query));
+ DBUG_PRINT("enter", ("id: %lu signal: %u", id, (uint) kill_signal));
+
mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
I_List_iterator<THD> it(threads);
while ((tmp=it++))
@@ -6544,12 +6565,16 @@ uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
If user of both killer and killee are non-NULL, proceed with
slayage if both are string-equal.
+
+ It's ok to also kill DELAYED threads with KILL_CONNECTION instead of
+ KILL_SYSTEM_THREAD; The difference is that KILL_CONNECTION may be
+ faster and do a harder kill than KILL_SYSTEM_THREAD;
*/
if ((thd->security_ctx->master_access & SUPER_ACL) ||
thd->security_ctx->user_matches(tmp->security_ctx))
{
- tmp->awake(only_kill_query ? THD::KILL_QUERY : THD::KILL_CONNECTION);
+ tmp->awake(kill_signal);
error=0;
}
else
@@ -6561,6 +6586,76 @@ uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
}
+/**
+ kill all threads from one user
+
+ @param thd Thread class
+ @param user_name User name for threads we should kill
+ @param only_kill_query Should it kill the query or the connection
+
+ @note
+ This is written such that we have a short lock on LOCK_thread_count
+
+ If we can't kill all threads because of security issues, no threads
+ are killed.
+*/
+
+static uint kill_threads_for_user(THD *thd, LEX_USER *user,
+ killed_state kill_signal, ha_rows *rows)
+{
+ THD *tmp;
+ List<THD> threads_to_kill;
+ DBUG_ENTER("kill_threads_for_user");
+
+ *rows= 0;
+
+ if (thd->is_fatal_error) // If we run out of memory
+ DBUG_RETURN(ER_OUT_OF_RESOURCES);
+
+ DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str,
+ (uint) kill_signal));
+
+ mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
+ I_List_iterator<THD> it(threads);
+ while ((tmp=it++))
+ {
+ if (tmp->command == COM_DAEMON)
+ continue;
+ /*
+ Check that hostname (if given) and user name matches.
+
+ host.str[0] == '%' means that host name was not given. See sql_yacc.yy
+ */
+ if (((user->host.str[0] == '%' && !user->host.str[1]) ||
+ !strcmp(tmp->security_ctx->host, user->host.str)) &&
+ !strcmp(tmp->security_ctx->user, user->user.str))
+ {
+ if (!(thd->security_ctx->master_access & SUPER_ACL) &&
+ !thd->security_ctx->user_matches(tmp->security_ctx))
+ {
+ mysql_mutex_unlock(&LOCK_thread_count);
+ DBUG_RETURN(ER_KILL_DENIED_ERROR);
+ }
+ if (!threads_to_kill.push_back(tmp, tmp->mem_root))
+ mysql_mutex_lock(&tmp->LOCK_thd_data); // Lock from delete
+ }
+ }
+ mysql_mutex_unlock(&LOCK_thread_count);
+ if (!threads_to_kill.is_empty())
+ {
+ List_iterator_fast<THD> it(threads_to_kill);
+ THD *ptr;
+ while ((ptr= it++))
+ {
+ ptr->awake(kill_signal);
+ mysql_mutex_unlock(&ptr->LOCK_thd_data);
+ (*rows)++;
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
/*
kills a thread and sends response
@@ -6572,10 +6667,10 @@ uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
*/
static
-void sql_kill(THD *thd, ulong id, bool only_kill_query)
+void sql_kill(THD *thd, ulong id, killed_state state)
{
uint error;
- if (!(error= kill_one_thread(thd, id, only_kill_query)))
+ if (!(error= kill_one_thread(thd, id, state)))
{
if (! thd->killed)
my_ok(thd);
@@ -6585,6 +6680,24 @@ void sql_kill(THD *thd, ulong id, bool only_kill_query)
}
+static
+void sql_kill_user(THD *thd, LEX_USER *user, killed_state state)
+{
+ uint error;
+ ha_rows rows;
+ if (!(error= kill_threads_for_user(thd, user, state, &rows)))
+ my_ok(thd, rows);
+ else
+ {
+ /*
+ This is probably ER_OUT_OF_RESOURCES, but in the future we may
+ want to write the name of the user we tried to kill
+ */
+ my_error(error, MYF(0), user->host.str, user->user.str);
+ }
+}
+
+
/** If pointer is not a null pointer, append filename to it. */
bool append_file_to_dir(THD *thd, const char **filename_ptr,
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 2c31f71af53..4510ebe94e2 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -50,8 +50,6 @@ bool parse_sql(THD *thd,
Parser_state *parser_state,
Object_creation_ctx *creation_ctx);
-uint kill_one_thread(THD *thd, ulong id, bool only_kill_query);
-
void free_items(Item *item);
void cleanup_items(Item *item);
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 06ead26414c..3024d9d2e46 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -3449,6 +3449,19 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
opt->name, plugin_name);
}
}
+ /*
+ PLUGIN_VAR_STR command-line options without PLUGIN_VAR_MEMALLOC, point
+ directly to values in the argv[] array. For plugins started at the
+ server startup, argv[] array is allocated with load_defaults(), and
+ freed when the server is shut down. But for plugins loaded with
+ INSTALL PLUGIN, the memory allocated with load_defaults() is freed with
+ freed() at the end of mysql_install_plugin(). Which means we cannot
+ allow any pointers into that area.
+ Thus, for all plugins loaded after the server was started,
+ we force all command-line options to be PLUGIN_VAR_MEMALLOC
+ */
+ if (mysqld_server_started && !(opt->flags & PLUGIN_VAR_NOCMDOPT))
+ opt->flags|= PLUGIN_VAR_MEMALLOC;
break;
case PLUGIN_VAR_ENUM:
if (!opt->check)
@@ -3610,6 +3623,8 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
*/
if (!my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster"))
plugin_load_option= PLUGIN_OFF;
+ if (!my_strcasecmp(&my_charset_latin1, tmp->name.str, "feedback"))
+ plugin_load_option= PLUGIN_OFF;
for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
count+= 2; /* --{plugin}-{optname} and --plugin-{plugin}-{optname} */
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ef6dd519d5d..f35d336fd65 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1504,7 +1504,6 @@ static int mysql_test_select(Prepared_statement *stmt,
goto error;
thd->lex->used_tables= 0; // Updated by setup_fields
- thd->thd_marker.emb_on_expr_nest= 0;
/*
JOIN::prepare calls
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index 33c5881d4f4..30c72d603f4 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -207,7 +207,11 @@ enabled by default, add OPTIMIZER_SWITCH_MATERIALIZATION
OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN|\
OPTIMIZER_SWITCH_JOIN_CACHE_INCREMENTAL | \
OPTIMIZER_SWITCH_JOIN_CACHE_HASHED | \
- OPTIMIZER_SWITCH_JOIN_CACHE_BKA)
+ OPTIMIZER_SWITCH_JOIN_CACHE_BKA | \
+ OPTIMIZER_SWITCH_SUBQUERY_CACHE |\
+ OPTIMIZER_SWITCH_SEMIJOIN | \
+ OPTIMIZER_SWITCH_FIRSTMATCH | \
+ OPTIMIZER_SWITCH_LOOSE_SCAN )
/*
Replication uses 8 bytes to store SQL_MODE in the binary log. The day you
use strictly more than 64 bits by adding one more define above, you should
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 91c78944205..16977d89cd6 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1505,9 +1505,9 @@ err:
idle, then this could last long, and if the slave reconnects, we could have 2
Binlog_dump threads in SHOW PROCESSLIST, until a query is written to the
binlog. To avoid this, when the slave reconnects and sends COM_BINLOG_DUMP,
- the master kills any existing thread with the slave's server id (if this id is
- not zero; it will be true for real slaves, but false for mysqlbinlog when it
- sends COM_BINLOG_DUMP to get a remote binlog dump).
+ the master kills any existing thread with the slave's server id (if this id
+ is not zero; it will be true for real slaves, but false for mysqlbinlog when
+ it sends COM_BINLOG_DUMP to get a remote binlog dump).
SYNOPSIS
kill_zombie_dump_threads()
@@ -1539,7 +1539,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id)
it will be slow because it will iterate through the list
again. We just to do kill the thread ourselves.
*/
- tmp->awake(THD::KILL_QUERY);
+ tmp->awake(KILL_QUERY);
mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 5499806db81..4ce278c98ef 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -190,14 +190,14 @@ int join_read_always_key_or_null(JOIN_TAB *tab);
int join_read_next_same_or_null(READ_RECORD *info);
static COND *make_cond_for_table(THD *thd, Item *cond,table_map table,
table_map used_table,
- uint join_tab_idx_arg,
+ int join_tab_idx_arg,
bool exclude_expensive_cond,
bool retain_ref_cond);
static COND *make_cond_for_table_from_pred(THD *thd, Item *root_cond,
Item *cond,
table_map tables,
table_map used_table,
- uint join_tab_idx_arg,
+ int join_tab_idx_arg,
bool exclude_expensive_cond,
bool retain_ref_cond);
@@ -714,6 +714,10 @@ JOIN::prepare(Item ***rref_pointer_array,
aggregate functions with implicit grouping (there is no GROUP BY).
*/
if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && !group_list &&
+ !(select_lex->master_unit()->item &&
+ select_lex->master_unit()->item->is_in_predicate() &&
+ ((Item_in_subselect*)select_lex->master_unit()->item)->
+ test_set_strategy(SUBS_MAXMIN_INJECTED)) &&
select_lex->non_agg_field_used() &&
select_lex->agg_func_used())
{
@@ -839,7 +843,7 @@ inject_jtbm_conds(JOIN *join, List<TABLE_LIST> *join_list, Item **join_where)
double rows;
double read_time;
- subq_pred->in_strategy &= ~SUBS_IN_TO_EXISTS;
+ DBUG_ASSERT(subq_pred->test_set_strategy(SUBS_MATERIALIZATION));
subq_pred->optimize(&rows, &read_time);
subq_pred->jtbm_read_time= read_time;
@@ -928,9 +932,6 @@ JOIN::optimize()
/* dump_TABLE_LIST_graph(select_lex, select_lex->leaf_tables); */
select_lex->update_used_tables();
- /* Save this info for the next executions */
- if (select_lex->save_leaf_tables(thd))
- DBUG_RETURN(1);
}
eval_select_list_used_tables();
@@ -990,6 +991,8 @@ JOIN::optimize()
/* Convert all outer joins to inner joins if possible */
conds= simplify_joins(this, join_list, conds, TRUE, FALSE);
+ if (select_lex->save_leaf_tables(thd))
+ DBUG_RETURN(1);
build_bitmap_for_nested_joins(join_list, 0);
sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0;
@@ -1118,7 +1121,7 @@ JOIN::optimize()
if (conds && !(thd->lex->describe & DESCRIBE_EXTENDED))
{
COND *table_independent_conds=
- make_cond_for_table(thd, conds, PSEUDO_TABLE_BITS, 0, MAX_TABLES,
+ make_cond_for_table(thd, conds, PSEUDO_TABLE_BITS, 0, -1,
FALSE, FALSE);
DBUG_EXECUTE("where",
print_where(table_independent_conds,
@@ -2424,7 +2427,6 @@ JOIN::exec()
thd_proc_info(thd, "Copying to group table");
DBUG_PRINT("info", ("%s", thd->proc_info));
- tmp_error= -1;
if (curr_join != this)
{
if (sum_funcs2)
@@ -2451,6 +2453,7 @@ JOIN::exec()
JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables;
first_tab->sorted= test(first_tab->loosescan_match_tab);
}
+ tmp_error= -1;
if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
(tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
0)))
@@ -2581,7 +2584,7 @@ JOIN::exec()
Item* sort_table_cond= make_cond_for_table(thd, curr_join->tmp_having,
used_tables,
- (table_map)0, MAX_TABLES,
+ (table_map)0, -1,
FALSE, FALSE);
if (sort_table_cond)
{
@@ -2611,7 +2614,9 @@ JOIN::exec()
if (curr_table->pre_idx_push_select_cond &&
!curr_table->pre_idx_push_select_cond->fixed)
curr_table->pre_idx_push_select_cond->fix_fields(thd, 0);
-
+
+ curr_table->select->pre_idx_push_select_cond=
+ curr_table->pre_idx_push_select_cond;
curr_table->set_select_cond(curr_table->select->cond, __LINE__);
curr_table->select_cond->top_level_item();
DBUG_EXECUTE("where",print_where(curr_table->select->cond,
@@ -2619,7 +2624,7 @@ JOIN::exec()
QT_ORDINARY););
curr_join->tmp_having= make_cond_for_table(thd, curr_join->tmp_having,
~ (table_map) 0,
- ~used_tables, MAX_TABLES,
+ ~used_tables, -1,
FALSE, FALSE);
DBUG_EXECUTE("where",print_where(curr_join->tmp_having,
"having after sort",
@@ -3004,6 +3009,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
uint i,table_count,const_count,key;
table_map found_const_table_map, all_table_map, found_ref, refs;
key_map const_ref, eq_part;
+ bool has_expensive_keyparts;
TABLE **table_vector;
JOIN_TAB *stat,*stat_end,*s,**stat_ref;
KEYUSE *keyuse,*start_keyuse;
@@ -3191,7 +3197,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
*/
bool skip_unprefixed_keyparts=
!(join->is_in_subquery() &&
- ((Item_in_subselect*)join->unit->item)->in_strategy & SUBS_IN_TO_EXISTS);
+ ((Item_in_subselect*)join->unit->item)->test_strategy(SUBS_IN_TO_EXISTS));
if (keyuse_array->elements &&
sort_and_filter_keyuse(join->thd, keyuse_array,
@@ -3329,12 +3335,17 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
refs=0;
const_ref.clear_all();
eq_part.clear_all();
+ has_expensive_keyparts= false;
do
{
if (keyuse->val->type() != Item::NULL_ITEM && !keyuse->optimize)
{
if (!((~found_const_table_map) & keyuse->used_tables))
+ {
const_ref.set_bit(keyuse->keypart);
+ if (keyuse->val->is_expensive())
+ has_expensive_keyparts= true;
+ }
else
refs|=keyuse->used_tables;
eq_part.set_bit(keyuse->keypart);
@@ -3356,6 +3367,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (table->key_info[key].flags & HA_NOSAME)
{
if (const_ref == eq_part &&
+ !has_expensive_keyparts &&
!((outer_join & table->map) &&
(*s->on_expr_ref)->is_expensive()))
{ // Found everything for ref.
@@ -3836,7 +3848,9 @@ add_key_field(JOIN *join,
uint optimize= 0;
if (eq_func &&
((join->is_allowed_hash_join_access() &&
- field->hash_join_is_possible()) ||
+ field->hash_join_is_possible() &&
+ !(field->table->pos_in_table_list->is_materialized_derived() &&
+ field->table->created)) ||
(field->table->pos_in_table_list->is_materialized_derived() &&
!field->table->created)))
{
@@ -5266,7 +5280,7 @@ best_access_path(JOIN *join,
tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else
tmp= table->file->read_time(key, 1,
- (ha_rows) min(tmp,s->worst_seeks)-1);
+ (ha_rows) min(tmp,s->worst_seeks));
tmp*= record_count;
}
}
@@ -5430,13 +5444,14 @@ best_access_path(JOIN *join,
tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else
tmp= table->file->read_time(key, 1,
- (ha_rows) min(tmp,s->worst_seeks)-1);
+ (ha_rows) min(tmp,s->worst_seeks));
tmp*= record_count;
}
else
tmp= best_time; // Do nothing
}
+ DBUG_ASSERT(tmp > 0 || record_count == 0);
tmp += s->startup_cost;
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
} /* not ft_key */
@@ -6136,7 +6151,7 @@ greedy_search(JOIN *join,
read_time_arg and record_count_arg contain the computed cost and fanout
*/
-void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
+void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
table_map filter_map,
double *read_time_arg,
double *record_count_arg)
@@ -6146,14 +6161,14 @@ void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
double sj_inner_fanout= 1.0;
JOIN_TAB *end_tab= NULL;
JOIN_TAB *tab;
- uint i;
- uint last_sj_table= MAX_TABLES;
+ int i;
+ int last_sj_table= MAX_TABLES;
/*
Handle a special case where the join is degenerate, and produces no
records
*/
- if (table_count == 0)
+ if (table_count == const_tables)
{
*read_time_arg= 0.0;
/*
@@ -6163,6 +6178,7 @@ void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
calculations.
*/
*record_count_arg=1.0;
+ return;
}
for (tab= first_depth_first_tab(this), i= const_tables;
@@ -6175,19 +6191,17 @@ void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
}
for (tab= first_depth_first_tab(this), i= const_tables;
- (i <= end_tab_idx && tab);
+ ;
tab= next_depth_first_tab(this, tab), i++)
{
- /*
- We've entered the SJM nest that contains the end_tab. The caller is
- actually
- - interested in fanout inside the nest (because that's how many times
- we'll invoke the attached WHERE conditions)
- - not interested in cost
- */
if (end_tab->bush_root_tab && end_tab->bush_root_tab == tab)
{
- /* Ok, end_tab is inside SJM nest and we're entering that nest now */
+ /*
+ We've entered the SJM nest that contains the end_tab. The caller is
+ - interested in fanout inside the nest (because that's how many times
+ we'll invoke the attached WHERE conditions)
+ - not interested in cost
+ */
record_count= 1.0;
read_time= 0.0;
}
@@ -6201,8 +6215,18 @@ void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
sj_inner_fanout= 1.0;
last_sj_table= i + tab->n_sj_tables;
}
-
- if (tab->records_read && (tab->table->map & filter_map))
+
+ table_map cur_table_map;
+ if (tab->table)
+ cur_table_map= tab->table->map;
+ else
+ {
+ /* This is a SJ-Materialization nest. Check all of its tables */
+ TABLE *first_child= tab->bush_children->start->table;
+ TABLE_LIST *sjm_nest= first_child->pos_in_table_list->embedding;
+ cur_table_map= sjm_nest->nested_join->used_tables;
+ }
+ if (tab->records_read && (cur_table_map & filter_map))
{
record_count *= tab->records_read;
read_time += tab->read_time;
@@ -6216,6 +6240,9 @@ void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
sj_inner_fanout= 1.0;
last_sj_table= MAX_TABLES;
}
+
+ if (tab == end_tab)
+ break;
}
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
*record_count_arg= record_count;
@@ -6648,6 +6675,16 @@ void JOIN_TAB::calc_used_field_length(bool max_fl)
rec_length+=(table->s->null_fields+7)/8;
if (table->maybe_null)
rec_length+=sizeof(my_bool);
+
+ /* Take into account that DuplicateElimination may need to store rowid */
+ uint rowid_add_size= 0;
+ if (keep_current_rowid)
+ {
+ rowid_add_size= table->file->ref_length;
+ rec_length += rowid_add_size;
+ fields++;
+ }
+
if (max_fl)
{
// TODO: to improve this estimate for max expected length
@@ -6661,13 +6698,9 @@ void JOIN_TAB::calc_used_field_length(bool max_fl)
}
max_used_fieldlength= rec_length;
}
- else if (table->file->stats.mean_rec_length)
- set_if_smaller(rec_length, table->file->stats.mean_rec_length);
+ else if (table->file->stats.mean_rec_length)
+ set_if_smaller(rec_length, table->file->stats.mean_rec_length + rowid_add_size);
- /*
- TODO: why we don't count here rowid that we might need to store when
- using DuplicateElimination?
- */
used_fields=fields;
used_fieldlength=rec_length;
used_blobs=blobs;
@@ -6704,7 +6737,7 @@ int JOIN_TAB::make_scan_filter()
if (cond &&
(tmp= make_cond_for_table(join->thd, cond,
join->const_table_map | table->map,
- table->map, MAX_TABLES, FALSE, TRUE)))
+ table->map, -1, FALSE, TRUE)))
{
DBUG_EXECUTE("where",print_where(tmp,"cache", QT_ORDINARY););
if (!(cache_select=
@@ -7176,6 +7209,7 @@ get_best_combination(JOIN *join)
goto loop_end; // Handled in make_join_stat..
j->loosescan_match_tab= NULL; //non-nulls will be set later
+ j->inside_loosescan_range= FALSE;
j->ref.key = -1;
j->ref.key_parts=0;
@@ -7309,7 +7343,8 @@ static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables)
except the const tables.
*/
table_map local_tables= jtab->emb_sj_nest->nested_join->used_tables |
- jtab->join->const_table_map;
+ jtab->join->const_table_map |
+ OUTER_REF_TABLE_BIT;
return !test(used_tables & ~local_tables);
}
@@ -7426,7 +7461,8 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
while (((~used_tables) & keyuse->used_tables) ||
(keyuse->keypart !=
(is_hash_join_key_no(key) ?
- keyinfo->key_part[i].field->field_index : i)))
+ keyinfo->key_part[i].field->field_index : i)) ||
+ !are_tables_local(j, keyuse->val->used_tables()))
keyuse++; /* Skip other parts */
uint maybe_null= test(keyinfo->key_part[i].null_bit);
@@ -7990,7 +8026,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
join->exec_const_cond=
make_cond_for_table(thd, cond,
join->const_table_map,
- (table_map) 0, MAX_TABLES, FALSE, FALSE);
+ (table_map) 0, -1, FALSE, FALSE);
/* Add conditions added by add_not_null_conds(). */
for (uint i= 0 ; i < join->const_tables ; i++)
add_cond_and_fix(thd, &join->exec_const_cond,
@@ -8009,7 +8045,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
COND *outer_ref_cond= make_cond_for_table(thd, cond,
OUTER_REF_TABLE_BIT,
OUTER_REF_TABLE_BIT,
- MAX_TABLES, FALSE, FALSE);
+ -1, FALSE, FALSE);
if (outer_ref_cond)
{
add_cond_and_fix(thd, &outer_ref_cond, join->outer_ref_cond);
@@ -8037,7 +8073,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
JOIN_TAB *first_inner_tab= tab->first_inner;
- if (tab->table)
+ if (!tab->bush_children)
current_map= tab->table->map;
else
current_map= tab->bush_children->start->emb_sj_nest->sj_inner_tables;
@@ -8181,7 +8217,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
COND *push_cond=
make_cond_for_table(thd, tmp, current_map, current_map,
- MAX_TABLES, FALSE, FALSE);
+ -1, FALSE, FALSE);
if (push_cond)
{
/* Push condition to handler */
@@ -8354,7 +8390,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
JOIN_TAB *cond_tab= join_tab->first_inner;
COND *tmp= make_cond_for_table(thd, *join_tab->on_expr_ref,
join->const_table_map,
- (table_map) 0, MAX_TABLES, FALSE, FALSE);
+ (table_map) 0, -1, FALSE, FALSE);
if (!tmp)
continue;
tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl);
@@ -8400,10 +8436,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
current_map= tab->table->map;
used_tables2|= current_map;
/*
- psergey: have put the MAX_TABLES below. It's bad, will need to fix it.
+ psergey: have put the -1 below. It's bad, will need to fix it.
*/
COND *tmp_cond= make_cond_for_table(thd, on_expr, used_tables2,
- current_map, /*(tab - first_tab)*/ MAX_TABLES,
+ current_map, /*(tab - first_tab)*/ -1,
FALSE, FALSE);
if (tab == first_inner_tab && tab->on_precond)
add_cond_and_fix(thd, &tmp_cond, tab->on_precond);
@@ -8484,17 +8520,17 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
TABLE *table= keyuse->table;
if (table->alloc_keys(keys))
return TRUE;
- uint keyno= 0;
+ uint key_count= 0;
KEYUSE *first_keyuse= keyuse;
uint prev_part= keyuse->keypart;
uint parts= 0;
uint i= 0;
- for ( ; i < count && keyno < keys; )
+ for ( ; i < count && key_count < keys; )
{
do
{
- keyuse->key= keyno;
+ keyuse->key= table->s->keys;
keyuse->keypart_map= (key_part_map) (1 << parts);
keyuse++;
i++;
@@ -8508,14 +8544,14 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
}
else
{
- if (table->add_tmp_key(keyno, parts,
+ if (table->add_tmp_key(table->s->keys, parts,
get_next_field_for_derived_key,
(uchar *) &first_keyuse,
FALSE))
return TRUE;
- table->reginfo.join_tab->keys.set_bit(keyno);
+ table->reginfo.join_tab->keys.set_bit(table->s->keys);
first_keyuse= keyuse;
- keyno++;
+ key_count++;
parts= 0;
prev_part= keyuse->keypart;
}
@@ -8542,12 +8578,23 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array)
TABLE_LIST *derived= NULL;
if (keyuse->table != prev_table)
derived= keyuse->table->pos_in_table_list;
- while (derived && derived->is_materialized_derived() &&
- keyuse->key == MAX_KEY)
+ while (derived && derived->is_materialized_derived())
{
if (keyuse->table != prev_table)
{
prev_table= keyuse->table;
+ while (keyuse->table == prev_table && keyuse->key != MAX_KEY)
+ {
+ keyuse++;
+ i++;
+ }
+ if (keyuse->table != prev_table)
+ {
+ keyuse--;
+ i--;
+ derived= NULL;
+ continue;
+ }
first_table_keyuse= keyuse;
last_used_tables= keyuse->used_tables;
count= 0;
@@ -8560,11 +8607,13 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array)
}
count++;
keyuse++;
+ i++;
if (keyuse->table != prev_table)
{
if (generate_derived_keys_for_table(first_table_keyuse, count, ++keys))
return TRUE;
keyuse--;
+ i--;
derived= NULL;
}
}
@@ -8595,12 +8644,13 @@ void JOIN::drop_unused_derived_keys()
TABLE *table=tab->table;
if (!table)
continue;
- if (!table->pos_in_table_list->is_materialized_derived() ||
- table->max_keys <= 1)
+ if (!table->pos_in_table_list->is_materialized_derived())
continue;
- table->use_index(tab->ref.key);
- if (table->s->keys)
+ if (table->max_keys > 1)
+ table->use_index(tab->ref.key);
+ if (table->s->keys && tab->ref.key >= 0)
tab->ref.key= 0;
+ tab->keys= (key_map) (table->s->keys ? 1 : 0);
}
}
@@ -8729,7 +8779,7 @@ void set_join_cache_denial(JOIN_TAB *join_tab)
void rr_unlock_row(st_join_table *tab)
{
READ_RECORD *info= &tab->read_record;
- info->file->unlock_row();
+ info->table->file->unlock_row();
}
@@ -9063,8 +9113,17 @@ uint check_join_cache_usage(JOIN_TAB *tab,
if (tab->use_quick == 2)
goto no_join_cache;
+
+ /*
+ Don't use join cache if we're inside a join tab range covered by LooseScan
+ strategy (TODO: LooseScan is very similar to FirstMatch so theoretically it
+ should be possible to use join buffering in the same way we're using it for
+ multi-table firstmatch ranges).
+ */
+ if (tab->inside_loosescan_range)
+ goto no_join_cache;
- if (tab->is_inner_table_of_semi_join_with_first_match() &&
+ if (tab->is_inner_table_of_semijoin() &&
!join->allowed_semijoin_with_cache)
goto no_join_cache;
if (tab->is_inner_table_of_outer_join() &&
@@ -9144,6 +9203,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
case JT_EQ_REF:
if (cache_level <=2 || (no_hashed_cache && no_bka_cache))
goto no_join_cache;
+ if (tab->ref.is_access_triggered())
+ goto no_join_cache;
+
if (!tab->is_ref_for_hash_join())
{
flags= HA_MRR_NO_NULL_ENDPOINTS | HA_MRR_SINGLE_POINT;
@@ -9208,7 +9270,10 @@ uint check_join_cache_usage(JOIN_TAB *tab,
no_join_cache:
if (tab->type != JT_ALL && tab->is_ref_for_hash_join())
+ {
tab->type= JT_ALL;
+ tab->ref.key_parts= 0;
+ }
revise_cache_usage(tab);
return 0;
}
@@ -9389,7 +9454,6 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
TABLE *table=tab->table;
uint jcl= tab->used_join_cache_level;
tab->read_record.table= table;
- tab->read_record.file=table->file;
tab->read_record.unlock_row= rr_unlock_row;
tab->sorted= sorted;
sorted= 0; // only first must be sorted
@@ -9435,9 +9499,9 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
table->key_read=1;
table->file->extra(HA_EXTRA_KEYREAD);
}
- else if (!jcl || jcl > 4)
+ else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
- break;
+ break;
case JT_EQ_REF:
tab->read_record.unlock_row= join_read_key_unlock_row;
/* fall through */
@@ -9447,7 +9511,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
table->key_read=1;
table->file->extra(HA_EXTRA_KEYREAD);
}
- else if (!jcl || jcl > 4)
+ else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
case JT_REF_OR_NULL:
@@ -9462,7 +9526,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
table->enable_keyread();
- else if (!jcl || jcl > 4)
+ else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
case JT_ALL:
@@ -9671,7 +9735,6 @@ void JOIN_TAB::cleanup()
table->pos_in_table_list->jtbm_subselect)
{
end_read_record(&read_record);
- //psergey-merge:
table->pos_in_table_list->jtbm_subselect->cleanup();
table= NULL;
DBUG_VOID_RETURN;
@@ -9744,6 +9807,8 @@ bool JOIN_TAB::preread_init()
derived, DT_CREATE | DT_FILL))
return TRUE;
preread_init_done= TRUE;
+ if (select && select->quick)
+ select->quick->replace_handler(table->file);
return FALSE;
}
@@ -9819,6 +9884,22 @@ bool TABLE_REF::tmp_table_index_lookup_init(THD *thd,
}
+/*
+ Check if ref access uses "Full scan on NULL key" (i.e. it actually alternates
+ between ref access and full table scan)
+*/
+
+bool TABLE_REF::is_access_triggered()
+{
+ for (uint i = 0; i < key_parts; i++)
+ {
+ if (cond_guards[i])
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
/**
Partially cleanup JOIN after it has executed: close index or rnd read
(table cursors), free quick selects.
@@ -13954,6 +14035,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
bool maybe_null=(*cur_group->item)->maybe_null;
key_part_info->null_bit=0;
key_part_info->field= field;
+ key_part_info->fieldnr= field->field_index + 1;
if (cur_group == group)
field->key_start.set_bit(0);
key_part_info->offset= field->offset(table->record[0]);
@@ -14077,6 +14159,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
key_part_info->field->init(table);
key_part_info->key_type=FIELDFLAG_BINARY;
key_part_info->type= HA_KEYTYPE_BINARY;
+ key_part_info->fieldnr= key_part_info->field->field_index + 1;
key_part_info++;
}
/* Create a distinct key over the columns we are going to return */
@@ -14094,6 +14177,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
key_part_info->offset= (*reg_field)->offset(table->record[0]);
key_part_info->length= (uint16) (*reg_field)->pack_length();
+ key_part_info->fieldnr= (*reg_field)->field_index + 1;
/* TODO:
The below method of computing the key format length of the
key part is a copy/paste from opt_range.cc, and table.cc.
@@ -14371,6 +14455,13 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
keyinfo->key_parts > table->file->max_key_parts() ||
share->uniques)
{
+ if (!share->uniques && !(keyinfo->flags & HA_NOSAME))
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "Using too big key for internal temp tables");
+ DBUG_RETURN(1);
+ }
+
/* Can't create a key; Make a unique constraint instead of a key */
share->keys= 0;
share->uniques= 1;
@@ -14389,9 +14480,9 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
else
{
- /* Create an unique key */
+ /* Create a key */
bzero((char*) &keydef,sizeof(keydef));
- keydef.flag=HA_NOSAME;
+ keydef.flag= keyinfo->flags & HA_NOSAME;
keydef.keysegs= keyinfo->key_parts;
keydef.seg= seg;
}
@@ -14566,7 +14657,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{
/* Create an unique key */
bzero((char*) &keydef,sizeof(keydef));
- keydef.flag=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
+ keydef.flag= ((keyinfo->flags & HA_NOSAME) | HA_BINARY_PACK_KEY |
+ HA_PACK_KEY);
keydef.keysegs= keyinfo->key_parts;
keydef.seg= seg;
}
@@ -15097,10 +15189,12 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
{
enum_nested_loop_state rc;
JOIN_CACHE *cache= join_tab->cache;
-
DBUG_ENTER("sub_select_cache");
- /* This function cannot be called if join_tab has no associated join buffer */
+ /*
+ This function cannot be called if join_tab has no associated join
+ buffer
+ */
DBUG_ASSERT(cache != NULL);
join_tab->cache->reset_join(join);
@@ -15929,7 +16023,7 @@ int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref)
*/
if (tab && tab->ref.has_record && tab->ref.use_count == 0)
{
- tab->read_record.file->unlock_row();
+ tab->read_record.table->file->unlock_row();
table_ref->has_record= FALSE;
}
error=table->file->ha_index_read_map(table->record[0],
@@ -16114,7 +16208,7 @@ join_init_quick_read_record(JOIN_TAB *tab)
int read_first_record_seq(JOIN_TAB *tab)
{
- if (tab->read_record.file->ha_rnd_init_with_error(1))
+ if (tab->read_record.table->file->ha_rnd_init_with_error(1))
return 1;
return (*tab->read_record.read_record)(&tab->read_record);
}
@@ -16181,7 +16275,6 @@ join_read_first(JOIN_TAB *tab)
tab->table->status=0;
tab->read_record.read_record=join_read_next;
tab->read_record.table=table;
- tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
@@ -16202,7 +16295,7 @@ static int
join_read_next(READ_RECORD *info)
{
int error;
- if ((error= info->file->ha_index_next(info->record)))
+ if ((error= info->table->file->ha_index_next(info->record)))
return report_error(info->table, error);
return 0;
@@ -16220,7 +16313,6 @@ join_read_last(JOIN_TAB *tab)
tab->table->status=0;
tab->read_record.read_record=join_read_prev;
tab->read_record.table=table;
- tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
@@ -16238,7 +16330,7 @@ static int
join_read_prev(READ_RECORD *info)
{
int error;
- if ((error= info->file->ha_index_prev(info->record)))
+ if ((error= info->table->file->ha_index_prev(info->record)))
return report_error(info->table, error);
return 0;
}
@@ -16267,7 +16359,7 @@ static int
join_ft_read_next(READ_RECORD *info)
{
int error;
- if ((error= info->file->ha_ft_read(info->table->record[0])))
+ if ((error= info->table->file->ha_ft_read(info->table->record[0])))
return report_error(info->table, error);
return 0;
}
@@ -16849,13 +16941,8 @@ bool test_if_ref(Item *root_cond, Item_field *left_item,Item *right_item)
between ref access and full table scan), then no equality can be
guaranteed to be true.
*/
- for (uint i = 0; i < join_tab->ref.key_parts; i++)
- {
- if (join_tab->ref.cond_guards[i])
- {
- return FALSE;
- }
- }
+ if (join_tab->ref.is_access_triggered())
+ return FALSE;
Item *ref_item=part_of_refkey(field->table,field);
if (ref_item && (ref_item->eq(right_item,1) ||
@@ -16937,7 +17024,7 @@ bool test_if_ref(Item *root_cond, Item_field *left_item,Item *right_item)
static Item *
make_cond_for_table(THD *thd, Item *cond, table_map tables,
table_map used_table,
- uint join_tab_idx_arg,
+ int join_tab_idx_arg,
bool exclude_expensive_cond __attribute__((unused)),
bool retain_ref_cond)
{
@@ -16951,7 +17038,7 @@ make_cond_for_table(THD *thd, Item *cond, table_map tables,
static Item *
make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond,
table_map tables, table_map used_table,
- uint join_tab_idx_arg,
+ int join_tab_idx_arg,
bool exclude_expensive_cond __attribute__
((unused)),
bool retain_ref_cond)
@@ -17917,8 +18004,10 @@ check_reverse_order:
condition are not relevant anymore
*/
if (tab->select && tab->select->pre_idx_push_select_cond)
+ {
tab->set_cond(tab->select->pre_idx_push_select_cond);
-
+ tab->table->file->cancel_pushed_idx_cond();
+ }
/*
TODO: update the number of records in join->best_positions[tablenr]
*/
@@ -17971,14 +18060,11 @@ skipped_filesort:
delete save_quick;
save_quick= NULL;
}
- /*
- orig_cond is a part of pre_idx_push_cond,
- no need to restore it.
- */
- orig_cond= 0;
- orig_cond_saved= false;
if (orig_cond_saved && !changed_key)
tab->set_cond(orig_cond);
+ if (!no_changes && changed_key && table->file->pushed_idx_cond)
+ table->file->cancel_pushed_idx_cond();
+
DBUG_RETURN(1);
use_filesort:
@@ -17990,6 +18076,7 @@ use_filesort:
}
if (orig_cond_saved)
tab->set_cond(orig_cond);
+
DBUG_RETURN(0);
}
@@ -19887,7 +19974,6 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (thd->is_fatal_error)
DBUG_RETURN(TRUE);
-
if (!cond->fixed)
{
Item *tmp_item= (Item*) cond;
@@ -19896,15 +19982,20 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (join_tab->select)
{
+ Item *cond_copy;
+ UNINIT_VAR(cond_copy); // used when pre_idx_push_select_cond!=NULL
+ if (join_tab->select->pre_idx_push_select_cond)
+ cond_copy= cond->copy_andor_structure(thd);
if (join_tab->select->cond)
error=(int) cond->add(join_tab->select->cond);
join_tab->select->cond= cond;
if (join_tab->select->pre_idx_push_select_cond)
{
- Item *new_cond= and_conds(join_tab->select->pre_idx_push_select_cond, cond);
+ Item *new_cond= and_conds(cond_copy, join_tab->select->pre_idx_push_select_cond);
if (!new_cond->fixed && new_cond->fix_fields(thd, &new_cond))
error= 1;
- join_tab->select->pre_idx_push_select_cond= new_cond;
+ join_tab->pre_idx_push_select_cond=
+ join_tab->select->pre_idx_push_select_cond= new_cond;
}
join_tab->set_select_cond(cond, __LINE__);
}
@@ -20699,9 +20790,13 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
length= (longlong10_to_str(key_len, keylen_str_buf, 10) -
keylen_str_buf);
tmp3.append(keylen_str_buf, length, cs);
+/*<<<<<<< TREE
}
if ((is_hj || tab->type==JT_RANGE || tab->type == JT_INDEX_MERGE) &&
tab->select && tab->select->quick)
+=======*/
+ }
+ if (tab->type != JT_CONST && tab->select && tab->select->quick)
tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3);
if (key_info || (tab->select && tab->select->quick))
{
@@ -21577,6 +21672,28 @@ void JOIN::save_query_plan(Join_plan_state *save_to)
memcpy((uchar*) save_to->best_positions, (uchar*) best_positions,
sizeof(POSITION) * (table_count + 1));
memset(best_positions, 0, sizeof(POSITION) * (table_count + 1));
+
+ /* Save SJM nests */
+ List_iterator<TABLE_LIST> it(select_lex->sj_nests);
+ TABLE_LIST *tlist;
+ SJ_MATERIALIZATION_INFO **p_info= save_to->sj_mat_info;
+ while ((tlist= it++))
+ {
+ *(p_info++)= tlist->sj_mat_info;
+ }
+}
+
+
+/**
+ Reset a query execution plan so that it can be reoptimized in-place.
+*/
+void JOIN::reset_query_plan()
+{
+ for (uint i= 0; i < table_count; i++)
+ {
+ join_tab[i].keyuse= NULL;
+ join_tab[i].checked_keys.clear_all();
+ }
}
@@ -21604,6 +21721,14 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
}
memcpy((uchar*) best_positions, (uchar*) restore_from->best_positions,
sizeof(POSITION) * (table_count + 1));
+ /* Restore SJM nests */
+ List_iterator<TABLE_LIST> it(select_lex->sj_nests);
+ TABLE_LIST *tlist;
+ SJ_MATERIALIZATION_INFO **p_info= restore_from->sj_mat_info;
+ while ((tlist= it++))
+ {
+ tlist->sj_mat_info= *(p_info++);
+ }
}
@@ -21613,7 +21738,8 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
@param added_where An extra conjunct to the WHERE clause to reoptimize with
@param join_tables The set of tables to reoptimize
- @param save_to If != NULL, save here the state of the current query plan
+ @param save_to If != NULL, save here the state of the current query plan,
+ otherwise reuse the existing query plan structures.
@notes
Given a query plan that was already optimized taking into account some WHERE
@@ -21657,6 +21783,8 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
if (save_to)
save_query_plan(save_to);
+ else
+ reset_query_plan();
if (!keyuse.buffer &&
my_init_dynamic_array(&keyuse, sizeof(KEYUSE), 20, 64))
@@ -21690,6 +21818,9 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
return REOPT_ERROR;
optimize_keyuse(this, &keyuse);
+ if (optimize_semijoin_nests(this, join_tables))
+ return REOPT_ERROR;
+
/* Re-run the join optimizer to compute a new query plan. */
if (choose_plan(this, join_tables))
return REOPT_ERROR;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index d87504f719c..4b5e2903c1d 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -143,6 +143,7 @@ typedef struct st_table_ref
bool tmp_table_index_lookup_init(THD *thd, KEY *tmp_key, Item_iterator &it,
bool value, uint skip= 0);
+ bool is_access_triggered();
} TABLE_REF;
@@ -284,7 +285,6 @@ typedef struct st_join_table {
ulong max_used_fieldlength;
uint used_blobs;
uint used_null_fields;
- uint used_rowid_fields;
uint used_uneven_bit_fields;
enum join_type type;
bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct;
@@ -343,6 +343,9 @@ typedef struct st_join_table {
NULL - Not doing a loose scan on this join tab.
*/
struct st_join_table *loosescan_match_tab;
+
+ /* TRUE <=> we are inside LooseScan range */
+ bool inside_loosescan_range;
/* Buffer to save index tuple to be able to skip duplicates */
uchar *loosescan_buf;
@@ -385,19 +388,14 @@ typedef struct st_join_table {
return (is_using_loose_index_scan() &&
((QUICK_GROUP_MIN_MAX_SELECT *)select->quick)->is_agg_distinct());
}
- bool check_rowid_field()
- {
- if (keep_current_rowid && !used_rowid_fields)
- {
- used_rowid_fields= 1;
- used_fieldlength+= table->file->ref_length;
- }
- return test(used_rowid_fields);
- }
bool is_inner_table_of_semi_join_with_first_match()
{
return first_sj_inner_tab != NULL;
}
+ bool is_inner_table_of_semijoin()
+ {
+ return emb_sj_nest != NULL;
+ }
bool is_inner_table_of_outer_join()
{
return first_inner != NULL;
@@ -674,6 +672,7 @@ protected:
KEYUSE *join_tab_keyuse[MAX_TABLES];
/* Copies of JOIN_TAB::checked_keys for each JOIN_TAB. */
key_map join_tab_checked_keys[MAX_TABLES];
+ SJ_MATERIALIZATION_INFO *sj_mat_info[MAX_TABLES];
public:
Join_plan_state()
{
@@ -699,6 +698,7 @@ protected:
enum_reopt_result reoptimize(Item *added_where, table_map join_tables,
Join_plan_state *save_to);
void save_query_plan(Join_plan_state *save_to);
+ void reset_query_plan();
void restore_query_plan(Join_plan_state *restore_from);
/* Choose a subquery plan for a table-less subquery. */
bool choose_tableless_subquery_plan();
@@ -1152,7 +1152,7 @@ public:
max_allowed_join_cache_level > JOIN_CACHE_HASHED_BIT;
}
bool choose_subquery_plan(table_map join_tables);
- void get_partial_cost_and_fanout(uint end_tab_idx,
+ void get_partial_cost_and_fanout(int end_tab_idx,
table_map filter_map,
double *read_time_arg,
double *record_count_arg);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index a65a2e40577..b7c3a29cace 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1218,7 +1218,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
handler *file= table->file;
TABLE_SHARE *share= table->s;
HA_CREATE_INFO create_info;
- bool show_table_options= FALSE;
+ bool show_table_options __attribute__ ((unused))= FALSE;
bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
MODE_ORACLE |
MODE_MSSQL |
@@ -1908,7 +1908,8 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
mysql_mutex_lock(&tmp->LOCK_thd_data);
if ((mysys_var= tmp->mysys_var))
mysql_mutex_lock(&mysys_var->mutex);
- thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0);
+ thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
+ "Killed" : 0);
thd_info->state_info= thread_state_info(tmp);
if (mysys_var)
mysql_mutex_unlock(&mysys_var->mutex);
@@ -2037,7 +2038,8 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
if ((mysys_var= tmp->mysys_var))
mysql_mutex_lock(&mysys_var->mutex);
/* COMMAND */
- if ((val= (char *) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0)))
+ if ((val= (char *) ((tmp->killed >= KILL_QUERY ?
+ "Killed" : 0))))
table->field[4]->store(val, strlen(val), cs);
else
table->field[4]->store(command_name[tmp->command].str,
@@ -2385,7 +2387,7 @@ static bool show_status_array(THD *thd, const char *wild,
end= strmov(buff, *(my_bool*) value ? "ON" : "OFF");
break;
case SHOW_INT:
- end= int10_to_str((long) *(uint32*) value, buff, 10);
+ end= int10_to_str((long) *(uint*) value, buff, 10);
break;
case SHOW_HAVE:
{
@@ -5058,7 +5060,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
String sp_name(sp_name_buff, sizeof(sp_name_buff), cs);
String definer(definer_buff, sizeof(definer_buff), cs);
sp_head *sp;
- uint routine_type;
+ stored_procedure_type routine_type;
bool free_sp_head;
DBUG_ENTER("store_schema_params");
@@ -5069,7 +5071,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DB], &sp_db);
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_NAME], &sp_name);
get_field(thd->mem_root,proc_table->field[MYSQL_PROC_FIELD_DEFINER],&definer);
- routine_type= (uint) proc_table->field[MYSQL_PROC_MYSQL_TYPE]->val_int();
+ routine_type= (stored_procedure_type) proc_table->field[MYSQL_PROC_MYSQL_TYPE]->val_int();
if (!full_access)
full_access= !strcmp(sp_user, definer.ptr());
@@ -7471,11 +7473,11 @@ bool get_schema_tables_result(JOIN *join,
{
result= 1;
join->error= 1;
- tab->read_record.file= table_list->table->file;
+ tab->read_record.table->file= table_list->table->file;
table_list->schema_table_state= executed_place;
break;
}
- tab->read_record.file= table_list->table->file;
+ tab->read_record.table->file= table_list->table->file;
table_list->schema_table_state= executed_place;
}
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 3f3d140dad8..9f24ed45842 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -661,6 +661,7 @@ bool st_select_lex_unit::exec()
if (!saved_error)
{
examined_rows+= thd->examined_row_count;
+ thd->examined_row_count= 0;
if (union_result->flush())
{
thd->lex->current_select= lex_select_save;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 9ab01736f66..99cac9f40e7 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -274,7 +274,7 @@ int mysql_update(THD *thd,
SELECT_LEX *select_lex= &thd->lex->select_lex;
ulonglong id;
List<Item> all_fields;
- THD::killed_state killed_status= THD::NOT_KILLED;
+ killed_state killed_status= NOT_KILLED;
DBUG_ENTER("mysql_update");
if (open_tables(thd, &table_list, &table_count, 0))
@@ -843,9 +843,9 @@ int mysql_update(THD *thd,
// simulated killing after the loop must be ineffective for binlogging
DBUG_EXECUTE_IF("simulate_kill_bug27571",
{
- thd->killed= THD::KILL_QUERY;
+ thd->killed= KILL_QUERY;
};);
- error= (killed_status == THD::NOT_KILLED)? error : 1;
+ error= (killed_status == NOT_KILLED)? error : 1;
if (error &&
will_batch &&
@@ -908,7 +908,7 @@ int mysql_update(THD *thd,
if (error < 0)
thd->clear_error();
else
- errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
+ errcode= query_error_code(thd, killed_status == NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
@@ -2052,7 +2052,7 @@ void multi_update::abort_result_set()
got caught and if happens later the killed error is written
into repl event.
*/
- int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
+ int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
/* the error of binary logging is ignored */
(void)thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
@@ -2265,7 +2265,7 @@ bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
ulonglong id;
- THD::killed_state killed_status= THD::NOT_KILLED;
+ killed_state killed_status= NOT_KILLED;
DBUG_ENTER("multi_update::send_eof");
thd_proc_info(thd, "updating reference tables");
@@ -2280,7 +2280,7 @@ bool multi_update::send_eof()
if local_error is not set ON until after do_updates() then
later carried out killing should not affect binlogging.
*/
- killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
+ killed_status= (local_error == 0) ? NOT_KILLED : thd->killed;
thd_proc_info(thd, "end");
/* We must invalidate the query cache before binlog writing and
@@ -2310,7 +2310,7 @@ bool multi_update::send_eof()
if (local_error == 0)
thd->clear_error();
else
- errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
+ errcode= query_error_code(thd, killed_status == NOT_KILLED);
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
transactional_tables, FALSE, FALSE, errcode))
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index c09b39c2450..94dffad822e 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -30,8 +30,8 @@
#include "sql_acl.h" // *_ACL, check_grant
#include "sql_select.h"
#include "parse_file.h"
-#include "sp.h"
#include "sp_head.h"
+#include "sp.h"
#include "sp_cache.h"
#include "datadict.h" // dd_frm_type()
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index d82936c87a9..64aa42fa337 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -788,10 +788,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%pure_parser /* We have threads */
/*
- Currently there are 171 shift/reduce conflicts.
+ Currently there are 174 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 171
+%expect 174
/*
Comments for TOKENS.
@@ -1015,6 +1015,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token GROUP_CONCAT_SYM
%token GT_SYM /* OPERATOR */
%token HANDLER_SYM
+%token HARD_SYM
%token HASH_SYM
%token HAVING /* SQL-2003-R */
%token HELP_SYM
@@ -1292,6 +1293,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token SMALLINT /* SQL-2003-R */
%token SNAPSHOT_SYM
%token SOCKET_SYM
+%token SOFT_SYM
%token SONAME_SYM
%token SOUNDS_SYM
%token SOURCE_SYM
@@ -1476,7 +1478,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
optional_flush_tables_arguments opt_dyncol_type dyncol_type
- opt_time_precision
+ opt_time_precision kill_type kill_option int_num
%type <m_yes_no_unk>
opt_chain opt_release
@@ -1511,7 +1513,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
function_call_keyword
function_call_nonkeyword
function_call_generic
- function_call_conflict
+ function_call_conflict kill_expr
signal_allowed_expr
%type <item_num>
@@ -10415,6 +10417,12 @@ delete_limit_clause:
}
;
+int_num:
+ NUM { int error; $$= (int) my_strtoll10($1.str, (char**) 0, &error); }
+ | '-' NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); }
+ | '-' LONG_NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); }
+ ;
+
ulong_num:
NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
| HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); }
@@ -11854,19 +11862,41 @@ purge_option:
/* kill threads */
kill:
- KILL_SYM kill_option expr
+ KILL_SYM
{
LEX *lex=Lex;
lex->value_list.empty();
- lex->value_list.push_front($3);
+ lex->users_list.empty();
lex->sql_command= SQLCOM_KILL;
}
+ kill_type kill_option kill_expr
+ {
+ Lex->kill_signal= (killed_state) ($3 | $4);
+ }
;
+kill_type:
+ /* Empty */ { $$= (int) KILL_HARD_BIT; }
+ | HARD_SYM { $$= (int) KILL_HARD_BIT; }
+ | SOFT_SYM { $$= 0; }
+
kill_option:
- /* empty */ { Lex->type= 0; }
- | CONNECTION_SYM { Lex->type= 0; }
- | QUERY_SYM { Lex->type= ONLY_KILL_QUERY; }
+ /* empty */ { $$= (int) KILL_CONNECTION; }
+ | CONNECTION_SYM { $$= (int) KILL_CONNECTION; }
+ | QUERY_SYM { $$= (int) KILL_QUERY; }
+ ;
+
+kill_expr:
+ expr
+ {
+ Lex->value_list.push_front($$);
+ Lex->kill_type= KILL_TYPE_ID;
+ }
+ | USER user
+ {
+ Lex->users_list.push_back($2);
+ Lex->kill_type= KILL_TYPE_USER;
+ }
;
/* change database */
@@ -12997,6 +13027,7 @@ keyword_sp:
| GRANTS {}
| GLOBAL_SYM {}
| HASH_SYM {}
+ | HARD_SYM {}
| HOSTS_SYM {}
| HOUR_SYM {}
| IDENTIFIED_SYM {}
@@ -13134,6 +13165,7 @@ keyword_sp:
| SHUTDOWN {}
| SLOW {}
| SNAPSHOT_SYM {}
+ | SOFT_SYM {}
| SOUNDS_SYM {}
| SOURCE_SYM {}
| SQL_CACHE_SYM {}
@@ -14242,7 +14274,7 @@ grant_option:
lex->mqh.conn_per_hour= $2;
lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
}
- | MAX_USER_CONNECTIONS_SYM ulong_num
+ | MAX_USER_CONNECTIONS_SYM int_num
{
LEX *lex=Lex;
lex->mqh.user_conn= $2;
diff --git a/sql/structs.h b/sql/structs.h
index 347cb97e152..8aeb41f7a1e 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -183,8 +183,11 @@ typedef struct user_resources {
uint updates;
/* Maximum number of connections established per hour. */
uint conn_per_hour;
- /* Maximum number of concurrent connections. */
- uint user_conn;
+ /*
+ Maximum number of concurrent connections. If -1 then no new
+ connections allowed
+ */
+ int user_conn;
/*
Values of this enum and specified_limits member are used by the
parser to store which user limits were specified in GRANT statement.
@@ -217,7 +220,7 @@ typedef struct user_conn {
/* Total length of the key. */
uint len;
/* Current amount of concurrent connections for this account. */
- uint connections;
+ int connections;
/*
Current number of connections per hour, number of updating statements
per hour and total number of statements per hour for this account.
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 37dfb95bc6f..6314a296b33 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -929,7 +929,8 @@ static Sys_var_mybool Sys_log_queries_not_using_indexes(
static Sys_var_ulong Sys_log_warnings(
"log_warnings",
- "Log some not critical warnings to the general log file",
+ "Log some not critical warnings to the general log file."
+ "Value can be between 0 and 11. Higher values mean more verbosity",
SESSION_VAR(log_warnings),
CMD_LINE(OPT_ARG, 'W'),
VALID_RANGE(0, ULONG_MAX), DEFAULT(1), BLOCK_SIZE(1));
@@ -1234,14 +1235,28 @@ static Sys_var_ulong Sys_max_sp_recursion_depth(
SESSION_VAR(max_sp_recursion_depth), CMD_LINE(OPT_ARG),
VALID_RANGE(0, 255), DEFAULT(0), BLOCK_SIZE(1));
+
+static bool if_checking_enabled(sys_var *self, THD *thd, set_var *var)
+{
+ if (session_readonly(self, thd, var))
+ return true;
+
+ if (!max_user_connections_checking)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--max-user-connections=0");
+ return true;
+ }
+
+ return false;
+}
// non-standard session_value_ptr() here
static Sys_var_max_user_conn Sys_max_user_connections(
"max_user_connections",
"The maximum number of active connections for a single user "
"(0 = no limit)",
SESSION_VAR(max_user_connections), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
- NOT_IN_BINLOG, ON_CHECK(session_readonly));
+ VALID_RANGE(0, INT_MAX), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG, ON_CHECK(if_checking_enabled));
static Sys_var_ulong Sys_max_tmp_tables(
"max_tmp_tables",
@@ -3341,18 +3356,18 @@ static Sys_var_mybool Sys_userstat(
CMD_LINE(OPT_ARG), DEFAULT(FALSE));
static Sys_var_mybool Sys_binlog_annotate_row_events(
- "binlog_annotate_rows_events",
+ "binlog_annotate_row_events",
"Tells the master to annotate RBR events with the statement that "
"caused these events",
- SESSION_VAR(binlog_annotate_rows_events), CMD_LINE(OPT_ARG),
+ SESSION_VAR(binlog_annotate_row_events), CMD_LINE(OPT_ARG),
DEFAULT(FALSE));
#ifdef HAVE_REPLICATION
-static Sys_var_mybool Sys_replicate_annotate_rows_events(
- "replicate_annotate_rows_events",
+static Sys_var_mybool Sys_replicate_annotate_row_events(
+ "replicate_annotate_row_events",
"Tells the slave to write annotate rows events recieved from the master "
"to its own binary log. Ignored if log_slave_updates is not set",
- READ_ONLY GLOBAL_VAR(opt_replicate_annotate_rows_events),
+ READ_ONLY GLOBAL_VAR(opt_replicate_annotate_row_events),
CMD_LINE(OPT_ARG), DEFAULT(0));
#endif
@@ -3417,3 +3432,12 @@ static Sys_var_session_special Sys_in_transaction(
VALID_RANGE(0, 1), BLOCK_SIZE(1), NO_MUTEX_GUARD,
NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0), ON_READ(in_transaction));
+#ifndef DBUG_OFF
+static Sys_var_ulong Sys_debug_binlog_fsync_sleep(
+ "debug_binlog_fsync_sleep",
+ "Extra sleep (in microseconds) to add to binlog fsync(), for debugging",
+ GLOBAL_VAR(opt_binlog_dbug_fsync_sleep),
+ CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, ULONG_MAX), DEFAULT(0), BLOCK_SIZE(1));
+#endif
+
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index f152c944a3e..653c0d94dbe 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -70,6 +70,21 @@
#define GET_HA_ROWS GET_ULONG
#endif
+/*
+ special assert for sysvars. Tells the name of the variable,
+ and fails even in non-debug builds.
+
+ It is supposed to be used *only* in Sys_var* constructors,
+ and has name_arg hard-coded to prevent incorrect usage.
+*/
+#define SYSVAR_ASSERT(X) \
+ while(!(X)) \
+ { \
+ fprintf(stderr, "Sysvar '%s' failed '%s'\n", name_arg, #X); \
+ DBUG_ABORT(); \
+ exit(255); \
+ }
+
enum charset_enum {IN_SYSTEM_CHARSET, IN_FS_CHARSET};
static const char *bool_values[3]= {"OFF", "ON", 0};
@@ -123,12 +138,12 @@ public:
if (max_var_ptr())
*max_var_ptr()= max_val;
global_var(T)= def_val;
- DBUG_ASSERT(size == sizeof(T));
- DBUG_ASSERT(min_val < max_val);
- DBUG_ASSERT(min_val <= def_val);
- DBUG_ASSERT(max_val >= def_val);
- DBUG_ASSERT(block_size > 0);
- DBUG_ASSERT(def_val % block_size == 0);
+ SYSVAR_ASSERT(size == sizeof(T));
+ SYSVAR_ASSERT(min_val < max_val);
+ SYSVAR_ASSERT(min_val <= def_val);
+ SYSVAR_ASSERT(max_val >= def_val);
+ SYSVAR_ASSERT(block_size > 0);
+ SYSVAR_ASSERT(def_val % block_size == 0);
}
bool do_check(THD *thd, set_var *var)
{
@@ -271,8 +286,8 @@ public:
{
option.var_type= GET_ENUM;
global_var(ulong)= def_val;
- DBUG_ASSERT(def_val < typelib.count);
- DBUG_ASSERT(size == sizeof(ulong));
+ SYSVAR_ASSERT(def_val < typelib.count);
+ SYSVAR_ASSERT(size == sizeof(ulong));
}
bool session_update(THD *thd, set_var *var)
{
@@ -318,9 +333,9 @@ public:
{
option.var_type= GET_BOOL;
global_var(my_bool)= def_val;
- DBUG_ASSERT(def_val < 2);
- DBUG_ASSERT(getopt.arg_type == OPT_ARG || getopt.id == -1);
- DBUG_ASSERT(size == sizeof(my_bool));
+ SYSVAR_ASSERT(def_val < 2);
+ SYSVAR_ASSERT(getopt.arg_type == OPT_ARG || getopt.id == -1);
+ SYSVAR_ASSERT(size == sizeof(my_bool));
}
bool session_update(THD *thd, set_var *var)
{
@@ -379,8 +394,8 @@ public:
*/
option.var_type= (flags & ALLOCATED) ? GET_STR_ALLOC : GET_STR;
global_var(const char*)= def_val;
- DBUG_ASSERT(scope() == GLOBAL);
- DBUG_ASSERT(size == sizeof(char *));
+ SYSVAR_ASSERT(scope() == GLOBAL);
+ SYSVAR_ASSERT(size == sizeof(char *));
}
void cleanup()
{
@@ -537,7 +552,7 @@ public:
on_check_func, on_update_func, deprecated_version, substitute)
{
global_var(LEX_STRING).length= strlen(def_val);
- DBUG_ASSERT(size == sizeof(LEX_STRING));
+ SYSVAR_ASSERT(size == sizeof(LEX_STRING));
*const_cast<SHOW_TYPE*>(&show_val_type)= SHOW_LEX_STRING;
}
bool global_update(THD *thd, set_var *var)
@@ -665,7 +680,7 @@ public:
option.var_type|= GET_ASK_ADDR;
option.value= (uchar**)1; // crash me, please
keycache_var(dflt_key_cache, off)= def_val;
- DBUG_ASSERT(scope() == GLOBAL);
+ SYSVAR_ASSERT(scope() == GLOBAL);
}
bool global_update(THD *thd, set_var *var)
{
@@ -825,10 +840,10 @@ public:
option.min_value= (longlong) double2ulonglong(min_val);
option.max_value= (longlong) double2ulonglong(max_val);
global_var(double)= (double)option.def_value;
- DBUG_ASSERT(min_val < max_val);
- DBUG_ASSERT(min_val <= def_val);
- DBUG_ASSERT(max_val >= def_val);
- DBUG_ASSERT(size == sizeof(double));
+ SYSVAR_ASSERT(min_val < max_val);
+ SYSVAR_ASSERT(min_val <= def_val);
+ SYSVAR_ASSERT(max_val >= def_val);
+ SYSVAR_ASSERT(size == sizeof(double));
}
bool do_check(THD *thd, set_var *var)
{
@@ -928,11 +943,11 @@ public:
{
option.var_type= GET_FLAGSET;
global_var(ulonglong)= def_val;
- DBUG_ASSERT(typelib.count > 1);
- DBUG_ASSERT(typelib.count <= 65);
- DBUG_ASSERT(def_val < MAX_SET(typelib.count));
- DBUG_ASSERT(strcmp(values[typelib.count-1], "default") == 0);
- DBUG_ASSERT(size == sizeof(ulonglong));
+ SYSVAR_ASSERT(typelib.count > 1);
+ SYSVAR_ASSERT(typelib.count <= 65);
+ SYSVAR_ASSERT(def_val < MAX_SET(typelib.count));
+ SYSVAR_ASSERT(strcmp(values[typelib.count-1], "default") == 0);
+ SYSVAR_ASSERT(size == sizeof(ulonglong));
}
bool do_check(THD *thd, set_var *var)
{
@@ -1039,10 +1054,10 @@ public:
{
option.var_type= GET_SET;
global_var(ulonglong)= def_val;
- DBUG_ASSERT(typelib.count > 0);
- DBUG_ASSERT(typelib.count <= 64);
- DBUG_ASSERT(def_val <= MAX_SET(typelib.count));
- DBUG_ASSERT(size == sizeof(ulonglong));
+ SYSVAR_ASSERT(typelib.count > 0);
+ SYSVAR_ASSERT(typelib.count <= 64);
+ SYSVAR_ASSERT(def_val <= MAX_SET(typelib.count));
+ SYSVAR_ASSERT(size == sizeof(ulonglong));
}
bool do_check(THD *thd, set_var *var)
{
@@ -1145,8 +1160,8 @@ public:
plugin_type(plugin_type_arg)
{
option.var_type= GET_STR;
- DBUG_ASSERT(size == sizeof(plugin_ref));
- DBUG_ASSERT(getopt.id == -1); // force NO_CMD_LINE
+ SYSVAR_ASSERT(size == sizeof(plugin_ref));
+ SYSVAR_ASSERT(getopt.id == -1); // force NO_CMD_LINE
}
bool do_check(THD *thd, set_var *var)
{
@@ -1256,7 +1271,7 @@ public:
lock, binlog_status_arg, on_check_func, on_update_func,
deprecated_version, substitute)
{
- DBUG_ASSERT(scope() == ONLY_SESSION);
+ SYSVAR_ASSERT(scope() == ONLY_SESSION);
option.var_type= GET_NO_ARG;
}
bool do_check(THD *thd, set_var *var)
@@ -1352,9 +1367,9 @@ public:
reverse_semantics= my_count_bits(bitmask_arg) > 1;
bitmask= reverse_semantics ? ~bitmask_arg : bitmask_arg;
set(global_var_ptr(), def_val);
- DBUG_ASSERT(def_val < 2);
- DBUG_ASSERT(getopt.id == -1); // force NO_CMD_LINE
- DBUG_ASSERT(size == sizeof(ulonglong));
+ SYSVAR_ASSERT(def_val < 2);
+ SYSVAR_ASSERT(getopt.id == -1); // force NO_CMD_LINE
+ SYSVAR_ASSERT(size == sizeof(ulonglong));
}
bool session_update(THD *thd, set_var *var)
{
@@ -1422,8 +1437,8 @@ public:
deprecated_version, substitute),
read_func(read_func_arg), update_func(update_func_arg)
{
- DBUG_ASSERT(scope() == ONLY_SESSION);
- DBUG_ASSERT(getopt.id == -1); // NO_CMD_LINE, because the offset is fake
+ SYSVAR_ASSERT(scope() == ONLY_SESSION);
+ SYSVAR_ASSERT(getopt.id == -1); // NO_CMD_LINE, because the offset is fake
}
bool session_update(THD *thd, set_var *var)
{ return update_func(thd, var); }
@@ -1472,8 +1487,8 @@ public:
deprecated_version, substitute),
read_func(read_func_arg), update_func(update_func_arg)
{
- DBUG_ASSERT(scope() == ONLY_SESSION);
- DBUG_ASSERT(getopt.id == -1); // NO_CMD_LINE, because the offset is fake
+ SYSVAR_ASSERT(scope() == ONLY_SESSION);
+ SYSVAR_ASSERT(getopt.id == -1); // NO_CMD_LINE, because the offset is fake
}
bool session_update(THD *thd, set_var *var)
{ return update_func(thd, var); }
@@ -1525,13 +1540,13 @@ public:
lock, binlog_status_arg, on_check_func, on_update_func,
deprecated_version, substitute)
{
- DBUG_ASSERT(scope() == GLOBAL);
- DBUG_ASSERT(getopt.id == -1);
- DBUG_ASSERT(lock == 0);
- DBUG_ASSERT(binlog_status_arg == VARIABLE_NOT_IN_BINLOG);
- DBUG_ASSERT(is_readonly());
- DBUG_ASSERT(on_update == 0);
- DBUG_ASSERT(size == sizeof(enum SHOW_COMP_OPTION));
+ SYSVAR_ASSERT(scope() == GLOBAL);
+ SYSVAR_ASSERT(getopt.id == -1);
+ SYSVAR_ASSERT(lock == 0);
+ SYSVAR_ASSERT(binlog_status_arg == VARIABLE_NOT_IN_BINLOG);
+ SYSVAR_ASSERT(is_readonly());
+ SYSVAR_ASSERT(on_update == 0);
+ SYSVAR_ASSERT(size == sizeof(enum SHOW_COMP_OPTION));
}
bool do_check(THD *thd, set_var *var) {
DBUG_ASSERT(FALSE);
@@ -1603,8 +1618,8 @@ public:
thus all struct command-line options should be added manually
to my_long_options in mysqld.cc
*/
- DBUG_ASSERT(getopt.id == -1);
- DBUG_ASSERT(size == sizeof(void *));
+ SYSVAR_ASSERT(getopt.id == -1);
+ SYSVAR_ASSERT(size == sizeof(void *));
}
bool do_check(THD *thd, set_var *var)
{ return false; }
@@ -1665,8 +1680,8 @@ public:
lock, binlog_status_arg, on_check_func, on_update_func,
deprecated_version, substitute)
{
- DBUG_ASSERT(getopt.id == -1);
- DBUG_ASSERT(size == sizeof(Time_zone *));
+ SYSVAR_ASSERT(getopt.id == -1);
+ SYSVAR_ASSERT(size == sizeof(Time_zone *));
}
bool do_check(THD *thd, set_var *var)
{
diff --git a/sql/table.cc b/sql/table.cc
index a157dbeb15a..07fae7329c8 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1068,14 +1068,13 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#endif
next_chunk+= 5 + partition_info_str_len;
}
- if (share->mysql_version >= 50110)
+ if (share->mysql_version >= 50110 && next_chunk < buff_end)
{
/* New auto_partitioned indicator introduced in 5.1.11 */
#ifdef WITH_PARTITION_STORAGE_ENGINE
share->auto_partitioned= *next_chunk;
#endif
next_chunk++;
- DBUG_ASSERT(next_chunk <= buff_end);
}
keyinfo= share->key_info;
for (i= 0; i < keys; i++, keyinfo++)
@@ -5571,10 +5570,11 @@ void TABLE::mark_virtual_columns_for_write(bool insert_fl)
@brief
Allocate space for keys
- @param key_count number of keys to allocate
+ @param key_count number of keys to allocate additionally
@details
- The function allocates memory to fit 'key_count' keys for this table.
+ The function allocates memory to fit additionally 'key_count' keys
+ for this table.
@return FALSE space was successfully allocated
@return TRUE an error occur
@@ -5582,9 +5582,11 @@ void TABLE::mark_virtual_columns_for_write(bool insert_fl)
bool TABLE::alloc_keys(uint key_count)
{
- key_info= s->key_info= (KEY*) alloc_root(&mem_root, sizeof(KEY)*key_count);
- s->keys= 0;
- max_keys= key_count;
+ key_info= (KEY*) alloc_root(&mem_root, sizeof(KEY)*(s->keys+key_count));
+ if (s->keys)
+ memmove(key_info, s->key_info, sizeof(KEY)*s->keys);
+ s->key_info= key_info;
+ max_keys= s->keys+key_count;
return !(key_info);
}
@@ -5729,7 +5731,7 @@ void TABLE::use_index(int key_to_save)
/* Drop all keys; */
i= 0;
- s->keys= (key_to_save < 0) ? 0 : 1;
+ s->keys= i;
}
/*
@@ -6094,7 +6096,7 @@ int update_virtual_fields(THD *thd, TABLE *table, bool for_write)
{
DBUG_ENTER("update_virtual_fields");
Field **vfield_ptr, *vfield;
- int error= 0;
+ int error __attribute__ ((unused))= 0;
if (!table || !table->vfield)
DBUG_RETURN(0);