summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorIgor Babaev <igor@askmonty.org>2012-02-22 13:04:58 -0800
committerIgor Babaev <igor@askmonty.org>2012-02-22 13:04:58 -0800
commit90c26209ba52cb7df75cf3464a7dde8bf47531b9 (patch)
treebc8b3f5226fdb97a2fc78cd1809d322d3f59cc82 /sql
parent56e1a6936bcdebb8fe672ea040235bfa9867e10e (diff)
parent567d871ff0c9cbdcaa4f9daa6810760edc901e14 (diff)
downloadmariadb-git-90c26209ba52cb7df75cf3464a7dde8bf47531b9.tar.gz
Merge.
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/Makefile.am1
-rw-r--r--sql/handler.cc21
-rw-r--r--sql/handler.h4
-rw-r--r--sql/item.cc31
-rw-r--r--sql/item_cmpfunc.cc10
-rw-r--r--sql/item_subselect.cc11
-rw-r--r--sql/item_sum.cc3
-rw-r--r--sql/item_timefunc.cc8
-rw-r--r--sql/item_timefunc.h5
-rw-r--r--sql/multi_range_read.cc29
-rw-r--r--sql/mysql_priv.h10
-rw-r--r--sql/mysqld.cc247
-rw-r--r--sql/opt_index_cond_pushdown.cc14
-rw-r--r--sql/opt_range.cc16
-rw-r--r--sql/opt_subselect.cc25
-rw-r--r--sql/opt_sum.cc7
-rw-r--r--sql/scheduler.cc2
-rw-r--r--sql/share/Makefile.am1
-rw-r--r--sql/signal_handler.cc286
-rw-r--r--sql/slave.cc8
-rw-r--r--sql/sql_acl.cc34
-rw-r--r--sql/sql_base.cc34
-rw-r--r--sql/sql_cache.cc33
-rw-r--r--sql/sql_cache.h4
-rw-r--r--sql/sql_class.h6
-rw-r--r--sql/sql_delete.cc7
-rw-r--r--sql/sql_derived.cc2
-rw-r--r--sql/sql_insert.cc19
-rw-r--r--sql/sql_join_cache.cc44
-rw-r--r--sql/sql_join_cache.h2
-rw-r--r--sql/sql_lex.cc1
-rw-r--r--sql/sql_list.h7
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_manager.cc12
-rw-r--r--sql/sql_parse.cc23
-rw-r--r--sql/sql_prepare.cc4
-rw-r--r--sql/sql_select.cc72
-rw-r--r--sql/sql_select.h15
-rw-r--r--sql/sql_table.cc5
-rw-r--r--sql/sql_union.cc1
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/table.cc62
-rw-r--r--sql/table.h16
-rw-r--r--sql/winservice.c4
45 files changed, 798 insertions, 355 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 4354bfc60fb..f4776f8f7df 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -69,6 +69,7 @@ SET (SQL_SOURCE
sql_map.cc sql_parse.cc sql_partition.cc sql_plugin.cc
sql_prepare.cc sql_rename.cc
debug_sync.cc debug_sync.h
+ signal_handler.cc
sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 291f662328b..875bea0e2a8 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -116,6 +116,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
records.cc filesort.cc handler.cc \
ha_partition.cc \
debug_sync.cc \
+ signal_handler.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
diff --git a/sql/handler.cc b/sql/handler.cc
index cbfe01480fe..265dd5ff45e 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -4556,6 +4556,27 @@ int handler::compare_key2(key_range *range)
}
+/**
+ ICP callback - to be called by an engine to check the pushed condition
+*/
+extern "C" enum icp_result handler_index_cond_check(void* h_arg)
+{
+ handler *h= (handler*)h_arg;
+ THD *thd= h->table->in_use;
+ enum icp_result res;
+
+ if (thd_killed(thd))
+ return ICP_ABORTED_BY_USER;
+
+ if (h->end_range && h->compare_key2(h->end_range) > 0)
+ return ICP_OUT_OF_RANGE;
+ h->increment_statistics(&SSV::ha_icp_attempts);
+ if ((res= h->pushed_idx_cond->val_int()? ICP_MATCH : ICP_NO_MATCH) ==
+ ICP_MATCH)
+ h->increment_statistics(&SSV::ha_icp_match);
+ return res;
+}
+
int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
diff --git a/sql/handler.h b/sql/handler.h
index 87ac1ab9ef3..28d8a96a895 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1553,6 +1553,8 @@ public:
{}
};
+extern "C" enum icp_result handler_index_cond_check(void* h_arg);
+
uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
/*
bitmap with first N+1 bits set
@@ -2632,6 +2634,8 @@ public:
{ return ht; }
inline int ha_write_tmp_row(uchar *buf);
inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data);
+
+ friend enum icp_result handler_index_cond_check(void* h_arg);
};
#include "multi_range_read.h"
diff --git a/sql/item.cc b/sql/item.cc
index aaf9b0c2a12..f9bbc4aeead 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -9052,13 +9052,13 @@ void Item_type_holder::get_full_info(Item *item)
DBUG_ASSERT((enum_set_typelib &&
get_real_type(item) == MYSQL_TYPE_NULL) ||
(!enum_set_typelib &&
- item->type() == Item::FIELD_ITEM &&
- (get_real_type(item) == MYSQL_TYPE_ENUM ||
- get_real_type(item) == MYSQL_TYPE_SET) &&
- ((Field_enum*)((Item_field *) item)->field)->typelib));
+ item->real_item()->type() == Item::FIELD_ITEM &&
+ (get_real_type(item->real_item()) == MYSQL_TYPE_ENUM ||
+ get_real_type(item->real_item()) == MYSQL_TYPE_SET) &&
+ ((Field_enum*)((Item_field *) item->real_item())->field)->typelib));
if (!enum_set_typelib)
{
- enum_set_typelib= ((Field_enum*)((Item_field *) item)->field)->typelib;
+ enum_set_typelib= ((Field_enum*)((Item_field *) item->real_item())->field)->typelib;
}
}
}
@@ -9143,18 +9143,22 @@ void Item_ref::update_used_tables()
}
-table_map Item_direct_view_ref::used_tables() const
+table_map Item_direct_view_ref::used_tables() const
{
- return get_depended_from() ?
+ return get_depended_from() ?
OUTER_REF_TABLE_BIT :
- (view->merged ? (*ref)->used_tables() : view->table->map);
+ ((view->merged || !view->table) ?
+ (*ref)->used_tables() :
+ view->table->map);
}
-table_map Item_direct_view_ref::not_null_tables() const
+table_map Item_direct_view_ref::not_null_tables() const
{
- return get_depended_from() ?
+ return get_depended_from() ?
0 :
- (view->merged ? (*ref)->not_null_tables() : view->table->map);
+ ((view->merged || !view->table) ?
+ (*ref)->not_null_tables() :
+ view->table->map);
}
/*
@@ -9168,6 +9172,8 @@ table_map Item_ref_null_helper::used_tables() const
}
+#ifndef DBUG_OFF
+
/* Debugger help function */
static char dbug_item_print_buf[256];
@@ -9184,6 +9190,9 @@ const char *dbug_print_item(Item *item)
else
return "Couldn't fit into buffer";
}
+
+#endif /*DBUG_OFF*/
+
/*****************************************************************************
** Instantiate templates
*****************************************************************************/
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 8b8a85ca59b..ddb80a3ed81 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -5853,13 +5853,15 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
if (emb_nest && emb_nest->sj_mat_info && emb_nest->sj_mat_info->is_used)
{
/*
- It's a field from an materialized semi-join. We can substitute it only
- for a field from the same semi-join. Find the first of such items.
+ It's a field from an materialized semi-join. We can substitute it for
+ - a constant item
+ - a field from the same semi-join
+ Find the first of such items:
*/
-
while ((item= it++))
{
- if (it.get_curr_field()->table->pos_in_table_list->embedding == emb_nest)
+ if (item->const_item() ||
+ it.get_curr_field()->table->pos_in_table_list->embedding == emb_nest)
{
/*
If we found given field then return NULL to avoid unnecessary
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index d66dbd12436..52a2c2a7d8b 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1586,7 +1586,6 @@ Item_in_subselect::single_value_transformer(JOIN *join)
(Item**)optimizer->get_cache(),
(char *)"<no matter>",
(char *)in_left_expr_name);
-
}
DBUG_RETURN(false);
@@ -2229,7 +2228,12 @@ bool Item_in_subselect::create_in_to_exists_cond(JOIN *join_arg)
/*
The IN=>EXISTS transformation makes non-correlated subqueries correlated.
*/
- join_arg->select_lex->uncacheable|= UNCACHEABLE_DEPENDENT_INJECTED;
+ if (!left_expr->const_item() || left_expr->is_expensive())
+ {
+ join_arg->select_lex->uncacheable|= UNCACHEABLE_DEPENDENT_INJECTED;
+ join_arg->select_lex->master_unit()->uncacheable|=
+ UNCACHEABLE_DEPENDENT_INJECTED;
+ }
/*
The uncacheable property controls a number of actions, e.g. whether to
save/restore (via init_save_join_tab/restore_tmp) the original JOIN for
@@ -2495,6 +2499,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
left_expr && !left_expr->fixed &&
left_expr->fix_fields(thd_arg, &left_expr))
return TRUE;
+ else
if (Item_subselect::fix_fields(thd_arg, ref))
return TRUE;
fixed= TRUE;
@@ -3142,6 +3147,8 @@ bool subselect_uniquesubquery_engine::copy_ref_key()
for (store_key **copy= tab->ref.key_copy ; *copy ; copy++)
{
+ if ((*copy)->store_key_is_const())
+ continue;
tab->ref.key_err= (*copy)->copy();
/*
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 183f8ccff10..60ec27863bd 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -582,13 +582,12 @@ Item_sum_num::fix_fields(THD *thd, Item **ref)
return TRUE;
decimals=0;
- maybe_null=0;
+ maybe_null= sum_func() != COUNT_FUNC;
for (uint i=0 ; i < arg_count ; i++)
{
if (args[i]->fix_fields(thd, args + i) || args[i]->check_cols(1))
return TRUE;
set_if_bigger(decimals, args[i]->decimals);
- maybe_null |= args[i]->maybe_null;
}
result_field=0;
max_length=float_length(decimals);
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index e01a4399cbe..1002cf9fea8 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1134,15 +1134,7 @@ bool Item_func_unix_timestamp::get_timestamp_value(my_time_t *seconds,
MYSQL_TIME ltime;
if (get_arg0_date(&ltime, 0))
- {
- /*
- We have to set null_value again because get_arg0_date will also set it
- to true if we have wrong datetime parameter (and we should return 0 in
- this case).
- */
- null_value= args[0]->null_value;
return 1;
- }
uint error_code;
*seconds= TIME_to_timestamp(current_thd, &ltime, &error_code);
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index e1bf53a5baf..8d19e59ddfb 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -385,11 +385,6 @@ public:
Item_func_unix_timestamp(Item *a) :Item_func_seconds_hybrid(a) {}
const char *func_name() const { return "unix_timestamp"; }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- void fix_num_length_and_dec()
- {
- maybe_null= false;
- Item_func_seconds_hybrid::fix_num_length_and_dec();
- }
/*
UNIX_TIMESTAMP() depends on the current timezone
(and thus may not be used as a partitioning function)
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 055a9268417..e86c9ca251c 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -466,6 +466,10 @@ void Mrr_ordered_index_reader::resume_read()
/**
Fill the buffer with (lookup_tuple, range_id) pairs and sort
+
+ @return
+ 0 OK, the buffer is non-empty and sorted
+ HA_ERR_END_OF_FILE Source exhausted, the buffer is empty.
*/
int Mrr_ordered_index_reader::refill_buffer(bool initial)
@@ -502,6 +506,13 @@ int Mrr_ordered_index_reader::refill_buffer(bool initial)
if (source_exhausted && key_buffer->is_empty())
DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if (!initial)
+ {
+ /* This is a non-initial buffer fill and we've got a non-empty buffer */
+ THD *thd= current_thd;
+ status_var_increment(thd->status_var.ha_mrr_key_refills_count);
+ }
+
key_buffer->sort((key_buffer->type() == Lifo_buffer::FORWARD)?
(qsort2_cmp)Mrr_ordered_index_reader::compare_keys_reverse :
(qsort2_cmp)Mrr_ordered_index_reader::compare_keys,
@@ -576,6 +587,7 @@ int Mrr_ordered_rndpos_reader::init(handler *h_arg,
int Mrr_ordered_rndpos_reader::refill_buffer(bool initial)
{
int res;
+ bool first_call= initial;
DBUG_ENTER("Mrr_ordered_rndpos_reader::refill_buffer");
if (index_reader_exhausted)
@@ -593,6 +605,14 @@ int Mrr_ordered_rndpos_reader::refill_buffer(bool initial)
initial= FALSE;
index_reader_needs_refill= FALSE;
}
+
+ if (!first_call && !index_reader_exhausted)
+ {
+ /* Ok, this was a successful buffer refill operation */
+ THD *thd= current_thd;
+ status_var_increment(thd->status_var.ha_mrr_rowid_refills_count);
+ }
+
DBUG_RETURN(res);
}
@@ -825,9 +845,6 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
strategy= disk_strategy= &reader_factory.ordered_rndpos_reader;
}
- if (is_mrr_assoc)
- status_var_increment(thd->status_var.ha_multi_range_read_init_count);
-
full_buf= buf->buffer;
full_buf_end= buf->buffer_end;
@@ -917,6 +934,12 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
goto error;
}
}
+
+ /*
+ At this point, we're sure that we're running a native MRR scan (i.e. we
+ didnt fall back to default implementation for some reason).
+ */
+ status_var_increment(thd->status_var.ha_mrr_init_count);
res= strategy->refill_buffer(TRUE);
if (res)
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 574d2ba6568..71498bb3d41 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1069,6 +1069,7 @@ struct Query_cache_query_flags
(((L)->sql_command == SQLCOM_SELECT) && (L)->safe_to_cache_query)
#else
#define QUERY_CACHE_FLAGS_SIZE 0
+#define QUERY_CACHE_DB_LENGTH_SIZE 0
#define query_cache_store_query(A, B) do { } while(0)
#define query_cache_destroy() do { } while(0)
#define query_cache_result_size_limit(A) do { } while(0)
@@ -2032,6 +2033,7 @@ void sql_perror(const char *message);
bool fn_format_relative_to_data_home(char * to, const char *name,
const char *dir, const char *extension);
+void set_server_version(void);
/**
Test a file path to determine if the path is compatible with the secure file
path restriction.
@@ -2182,13 +2184,13 @@ extern ulong binlog_checksum_options;
extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size;
extern ulong tc_log_page_waits;
extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb;
+extern my_bool opt_expect_abort, opt_stack_trace;
extern uint test_flags,select_errors,ha_open_options;
extern uint protocol_version, mysqld_port, mysqld_extra_port, dropping_tables;
extern uint delay_key_write_options;
-extern ulong max_long_data_size;
+extern ulong max_long_data_size, max_used_connections;
extern uint internal_tmp_table_max_key_length;
extern uint internal_tmp_table_max_key_segments;
-
#endif /* MYSQL_SERVER */
#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
extern MYSQL_PLUGIN_IMPORT uint lower_case_table_names;
@@ -2258,7 +2260,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
- LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
+ LOCK_slave_list, LOCK_active_mi, LOCK_global_read_lock,
LOCK_global_system_variables, LOCK_user_conn,
LOCK_prepared_stmt_count,
LOCK_bytes_sent, LOCK_bytes_received, LOCK_connection_count;
@@ -2276,7 +2278,7 @@ extern pthread_mutex_t LOCK_stats;
extern int mysqld_server_started;
extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
extern rw_lock_t LOCK_system_variables_hash;
-extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager;
+extern pthread_cond_t COND_refresh, COND_thread_count;
extern pthread_cond_t COND_global_read_lock;
extern pthread_attr_t connection_attrib;
extern I_List<THD> threads;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 1ff73b833d4..1a692871dc6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -117,9 +117,6 @@ extern "C" { // Because of SCO 3.2V4.2
#ifdef __WIN__
#include <crtdbg.h>
-#define SIGNAL_FMT "exception 0x%x"
-#else
-#define SIGNAL_FMT "signal %d"
#endif
#ifdef __NETWARE__
@@ -263,7 +260,7 @@ inline void setup_fpu()
extern "C" int gethostname(char *name, int namelen);
#endif
-extern "C" sig_handler handle_segfault(int sig);
+extern "C" sig_handler handle_fatal_signal(int sig);
#if defined(__linux__)
#define ENABLE_TEMP_POOL 1
@@ -460,6 +457,10 @@ TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"",
/* static variables */
+#ifdef HAVE_NPTL
+volatile sig_atomic_t ld_assume_kernel_is_set= 0;
+#endif
+
/* the default log output is log tables */
static bool lower_case_table_names_used= 0;
static bool max_long_data_size_used= false;
@@ -467,7 +468,7 @@ static bool volatile select_thread_in_use, signal_thread_in_use;
static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0;
-static my_bool opt_ignore_wrong_options= 0, opt_expect_abort= 0;
+static my_bool opt_ignore_wrong_options= 0;
static my_bool opt_sync= 0, opt_thread_alarm;
/*
Set this to 1 if you want to that 'strict mode' should affect all date
@@ -475,11 +476,10 @@ static my_bool opt_sync= 0, opt_thread_alarm;
dates into a table.
*/
my_bool strict_date_checking= 0;
-
static uint kill_cached_threads, wake_thread;
ulong thread_created;
uint thread_handling;
-static ulong max_used_connections;
+ulong max_used_connections;
static ulong my_bind_addr; /**< the address we bind to */
static volatile ulong cached_thread_count= 0;
static const char *sql_mode_str= "OFF";
@@ -640,7 +640,7 @@ TYPELIB binlog_format_typelib=
ulong opt_binlog_format_id= (ulong) BINLOG_FORMAT_UNSPEC;
const char *opt_binlog_format= binlog_format_names[opt_binlog_format_id];
#ifdef HAVE_INITGROUPS
-static bool calling_initgroups= FALSE; /**< Used in SIGSEGV handler. */
+volatile sig_atomic_t calling_initgroups= 0; /**< Used in SIGSEGV handler. */
#endif
uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
uint mysqld_extra_port;
@@ -831,8 +831,11 @@ char *opt_logname, *opt_slow_logname;
/* Static variables */
-static bool kill_in_progress, segfaulted;
-static my_bool opt_stack_trace;
+static volatile sig_atomic_t kill_in_progress;
+#ifdef HAVE_STACKTRACE
+my_bool opt_stack_trace;
+#endif /* HAVE_STACKTRACE */
+my_bool opt_expect_abort= 0;
static my_bool opt_bootstrap, opt_myisam_log;
static int cleanup_done;
static ulong opt_specialflag, opt_myisam_block_size;
@@ -943,7 +946,6 @@ pthread_handler_t signal_hand(void *arg);
static int mysql_init_variables(void);
static int get_options(int *argc,char **argv);
extern "C" my_bool mysqld_get_one_option(int, const struct my_option *, char *);
-static void set_server_version(void);
static int init_thread_environment();
static char *get_relative_path(const char *path);
static int fix_paths(void);
@@ -1590,7 +1592,6 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_delayed_insert);
(void) pthread_mutex_destroy(&LOCK_delayed_status);
(void) pthread_mutex_destroy(&LOCK_delayed_create);
- (void) pthread_mutex_destroy(&LOCK_manager);
(void) pthread_mutex_destroy(&LOCK_crypt);
(void) pthread_mutex_destroy(&LOCK_bytes_sent);
(void) pthread_mutex_destroy(&LOCK_bytes_received);
@@ -1631,7 +1632,6 @@ static void clean_up_mutexes()
(void) pthread_cond_destroy(&COND_global_read_lock);
(void) pthread_cond_destroy(&COND_thread_cache);
(void) pthread_cond_destroy(&COND_flush_thread_cache);
- (void) pthread_cond_destroy(&COND_manager);
DBUG_VOID_RETURN;
}
@@ -1782,9 +1782,9 @@ static void set_user(const char *user, struct passwd *user_info_arg)
calling_initgroups as a flag to the SIGSEGV handler that is then used to
output a specific message to help the user resolve this problem.
*/
- calling_initgroups= TRUE;
+ calling_initgroups= 1;
initgroups((char*) user, user_info_arg->pw_gid);
- calling_initgroups= FALSE;
+ calling_initgroups= 0;
#endif
if (setgid(user_info_arg->pw_gid) == -1)
{
@@ -2351,7 +2351,7 @@ LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers)
__try
{
my_set_exception_pointers(ex_pointers);
- handle_segfault(ex_pointers->ExceptionRecord->ExceptionCode);
+ handle_fatal_signal(ex_pointers->ExceptionRecord->ExceptionCode);
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
@@ -2662,195 +2662,6 @@ extern "C" char *my_demangle(const char *mangled_name, int *status)
#endif
-extern "C" sig_handler handle_segfault(int sig)
-{
- time_t curr_time;
- struct tm tm;
-#ifdef HAVE_STACKTRACE
- THD *thd=current_thd;
-#endif
-
- /*
- Strictly speaking, one needs a mutex here
- but since we have got SIGSEGV already, things are a mess
- so not having the mutex is not as bad as possibly using a buggy
- mutex - so we keep things simple
- */
- if (segfaulted)
- {
- fprintf(stderr, "Fatal " SIGNAL_FMT " while backtracing\n", sig);
- exit(1);
- }
-
- segfaulted = 1;
-
- curr_time= my_time(0);
- localtime_r(&curr_time, &tm);
-
- fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d ",
- tm.tm_year % 100, tm.tm_mon+1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (opt_expect_abort && sig == SIGABRT)
- {
- fprintf(stderr,"[Note] mysqld did an expected abort\n");
- goto end;
- }
-
- fprintf(stderr,"[ERROR] mysqld got " SIGNAL_FMT " ;\n\
-This could be because you hit a bug. It is also possible that this binary\n\
-or one of the libraries it was linked against is corrupt, improperly built,\n\
-or misconfigured. This error can also be caused by malfunctioning hardware.\n",
- sig);
- fprintf(stderr, "\
-We will try our best to scrape up some info that will hopefully help diagnose\n\
-the problem, but since we have already crashed, something is definitely wrong\n\
-and this may fail.\n\n");
- set_server_version();
- fprintf(stderr, "Server version: %s\n", server_version);
- fprintf(stderr, "key_buffer_size=%lu\n",
- (ulong) dflt_key_cache->key_cache_mem_size);
- fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size);
- fprintf(stderr, "max_used_connections=%lu\n", max_used_connections);
- fprintf(stderr, "max_threads=%u\n", thread_scheduler.max_threads +
- (uint) extra_max_connections);
- fprintf(stderr, "threads_connected=%u\n", thread_count);
- fprintf(stderr, "It is possible that mysqld could use up to \n\
-key_buffer_size + (read_buffer_size + sort_buffer_size)*max_threads = %lu K\n\
-bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size +
- (global_system_variables.read_buff_size +
- global_system_variables.sortbuff_size) *
- (thread_scheduler.max_threads + extra_max_connections) +
- (max_connections + extra_max_connections)* sizeof(THD))
- / 1024);
- fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
-
-#if defined(HAVE_LINUXTHREADS)
- if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
- {
- fprintf(stderr, "\
-You seem to be running 32-bit Linux and have %d concurrent connections.\n\
-If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\
-yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\
-the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
- thread_count);
- }
-#endif /* HAVE_LINUXTHREADS */
-
-#ifdef HAVE_STACKTRACE
-
- if (opt_stack_trace)
- {
- fprintf(stderr, "Thread pointer: 0x%lx\n", (long) thd);
- fprintf(stderr, "Attempting backtrace. You can use the following "
- "information to find out\nwhere mysqld died. If "
- "you see no messages after this, something went\n"
- "terribly wrong...\n");
- my_print_stacktrace(thd ? (uchar*) thd->thread_stack : NULL,
- my_thread_stack_size);
- }
- if (thd)
- {
- const char *kreason= "UNKNOWN";
- switch (thd->killed) {
- case NOT_KILLED:
- case KILL_HARD_BIT:
- kreason= "NOT_KILLED";
- break;
- case KILL_BAD_DATA:
- case KILL_BAD_DATA_HARD:
- kreason= "KILL_BAD_DATA";
- break;
- case KILL_CONNECTION:
- case KILL_CONNECTION_HARD:
- kreason= "KILL_CONNECTION";
- break;
- case KILL_QUERY:
- case KILL_QUERY_HARD:
- kreason= "KILL_QUERY";
- break;
- case KILL_SYSTEM_THREAD:
- case KILL_SYSTEM_THREAD_HARD:
- kreason= "KILL_SYSTEM_THREAD";
- break;
- case KILL_SERVER:
- case KILL_SERVER_HARD:
- kreason= "KILL_SERVER";
- break;
- }
- fprintf(stderr, "\nTrying to get some variables.\n"
- "Some pointers may be invalid and cause the dump to abort.\n");
- fprintf(stderr, "Query (%p): ", thd->query());
- my_safe_print_str(thd->query(), min(65536,thd->query_length()));
- fprintf(stderr, "Connection ID (thread ID): %lu\n", (ulong) thd->thread_id);
- fprintf(stderr, "Status: %s\n", kreason);
- fprintf(stderr, "Optimizer switch: ");
- ulonglong optsw= thd->variables.optimizer_switch;
- for (uint i= 0; optimizer_switch_names[i+1]; i++, optsw >>= 1)
- {
- if (i)
- fputc(',', stderr);
- fprintf(stderr, "%s=%s",
- optimizer_switch_names[i], optsw & 1 ? "on" : "off");
- }
- fprintf(stderr, "\n\n");
- }
- fprintf(stderr, "\
-The manual page at http://dev.mysql.com/doc/mysql/en/crashing.html contains\n\
-information that should help you find out what is causing the crash.\n");
- fflush(stderr);
-#endif /* HAVE_STACKTRACE */
-
-#ifdef HAVE_INITGROUPS
- if (calling_initgroups)
- fprintf(stderr, "\n\
-This crash occured while the server was calling initgroups(). This is\n\
-often due to the use of a mysqld that is statically linked against glibc\n\
-and configured to use LDAP in /etc/nsswitch.conf. You will need to either\n\
-upgrade to a version of glibc that does not have this problem (2.3.4 or\n\
-later when used with nscd), disable LDAP in your nsswitch.conf, or use a\n\
-mysqld that is not statically linked.\n");
-#endif
-
-#ifdef HAVE_NPTL
- if (thd_lib_detected == THD_LIB_LT && !getenv("LD_ASSUME_KERNEL"))
- fprintf(stderr,"\n\
-You are running a statically-linked LinuxThreads binary on an NPTL system.\n\
-This can result in crashes on some distributions due to LT/NPTL conflicts.\n\
-You should either build a dynamically-linked binary, or force LinuxThreads\n\
-to be used with the LD_ASSUME_KERNEL environment variable. Please consult\n\
-the documentation for your distribution on how to do that.\n");
-#endif
-
- if (locked_in_memory)
- {
- fprintf(stderr, "\n\
-The \"--memlock\" argument, which was enabled, uses system calls that are\n\
-unreliable and unstable on some operating systems and operating-system\n\
-versions (notably, some versions of Linux). This crash could be due to use\n\
-of those buggy OS calls. You should consider whether you really need the\n\
-\"--memlock\" parameter and/or consult the OS distributer about \"mlockall\"\n\
-bugs.\n");
- }
-
-#ifdef HAVE_WRITE_CORE
- if (test_flags & TEST_CORE_ON_SIGNAL)
- {
- fprintf(stderr, "Writing a core file\n");
- fflush(stderr);
- my_write_core(sig);
- }
-#endif
-
-end:
-#ifndef __WIN__
- /* Terminate */
- exit(1);
-#else
- /* On Windows, do not terminate, but pass control to exception filter */
- ;
-#endif
-}
-
#if !defined(__WIN__) && !defined(__NETWARE__)
#ifndef SA_RESETHAND
#define SA_RESETHAND 0
@@ -2879,9 +2690,9 @@ static void init_signals(void)
my_init_stacktrace();
#endif
#if defined(__amiga__)
- sa.sa_handler=(void(*)())handle_segfault;
+ sa.sa_handler=(void(*)())handle_fatal_signal;
#else
- sa.sa_handler=handle_segfault;
+ sa.sa_handler=handle_fatal_signal;
#endif
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGABRT, &sa, NULL);
@@ -3889,7 +3700,6 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_delayed_insert,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_delayed_status,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_delayed_create,MY_MUTEX_INIT_SLOW);
- (void) pthread_mutex_init(&LOCK_manager,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_crypt,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_bytes_sent,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_bytes_received,MY_MUTEX_INIT_FAST);
@@ -3929,7 +3739,6 @@ static int init_thread_environment()
(void) pthread_cond_init(&COND_global_read_lock,NULL);
(void) pthread_cond_init(&COND_thread_cache,NULL);
(void) pthread_cond_init(&COND_flush_thread_cache,NULL);
- (void) pthread_cond_init(&COND_manager,NULL);
#ifdef HAVE_REPLICATION
(void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST);
(void) pthread_cond_init(&COND_rpl_status, NULL);
@@ -4678,6 +4487,10 @@ int win_main(int argc, char **argv)
int main(int argc, char **argv)
#endif
{
+#ifdef HAVE_NPTL
+ ld_assume_kernel_is_set= (getenv("LD_ASSUME_KERNEL") != 0);
+#endif
+
MY_INIT(argv[0]); // init my_sys library & pthreads
/* nothing should come before this line ^^^ */
@@ -8290,6 +8103,14 @@ SHOW_VAR status_vars[]= {
{"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
{"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS},
{"Handler_discover", (char*) offsetof(STATUS_VAR, ha_discover_count), SHOW_LONG_STATUS},
+
+ {"Handler_icp_attempts ", (char*) offsetof(STATUS_VAR, ha_icp_attempts), SHOW_LONG_STATUS},
+ {"Handler_icp_match", (char*) offsetof(STATUS_VAR, ha_icp_match), SHOW_LONG_STATUS},
+
+ {"Handler_mrr_init", (char*) offsetof(STATUS_VAR, ha_mrr_init_count), SHOW_LONG_STATUS},
+ {"Handler_mrr_key_refills", (char*) offsetof(STATUS_VAR, ha_mrr_key_refills_count), SHOW_LONG_STATUS},
+ {"Handler_mrr_rowid_refills", (char*) offsetof(STATUS_VAR, ha_mrr_rowid_refills_count), SHOW_LONG_STATUS},
+
{"Handler_prepare", (char*) offsetof(STATUS_VAR, ha_prepare_count), SHOW_LONG_STATUS},
{"Handler_read_first", (char*) offsetof(STATUS_VAR, ha_read_first_count), SHOW_LONG_STATUS},
{"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), SHOW_LONG_STATUS},
@@ -8301,10 +8122,10 @@ SHOW_VAR status_vars[]= {
{"Handler_rollback", (char*) offsetof(STATUS_VAR, ha_rollback_count), SHOW_LONG_STATUS},
{"Handler_savepoint", (char*) offsetof(STATUS_VAR, ha_savepoint_count), SHOW_LONG_STATUS},
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
- {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
- {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
{"Handler_tmp_update", (char*) offsetof(STATUS_VAR, ha_tmp_update_count), SHOW_LONG_STATUS},
{"Handler_tmp_write", (char*) offsetof(STATUS_VAR, ha_tmp_write_count), SHOW_LONG_STATUS},
+ {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
+ {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
{"Key", (char*) &show_default_keycache, SHOW_FUNC},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
@@ -8510,7 +8331,7 @@ static int mysql_init_variables(void)
opt_secure_file_priv= 0;
opt_bootstrap= opt_myisam_log= 0;
mqh_used= 0;
- segfaulted= kill_in_progress= 0;
+ kill_in_progress= 0;
cleanup_done= 0;
defaults_argc= 0;
defaults_argv= 0;
@@ -9684,7 +9505,7 @@ static int get_options(int *argc,char **argv)
(MYSQL_SERVER_SUFFIX is set by the compilation environment)
*/
-static void set_server_version(void)
+void set_server_version(void)
{
char *end= strxmov(server_version, MYSQL_SERVER_VERSION,
MYSQL_SERVER_SUFFIX_STR, NullS);
diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc
index 5240267b4ac..c2b49d863a1 100644
--- a/sql/opt_index_cond_pushdown.cc
+++ b/sql/opt_index_cond_pushdown.cc
@@ -329,13 +329,23 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
{
DBUG_ENTER("push_index_cond");
Item *idx_cond;
-
+
+ /*
+ Backported the following from MySQL 5.6:
+ 6. The index is not a clustered index. The performance improvement
+ of pushing an index condition on a clustered key is much lower
+ than on a non-clustered key. This restriction should be
+ re-evaluated when WL#6061 is implemented.
+ */
if ((tab->table->file->index_flags(keyno, 0, 1) &
HA_DO_INDEX_COND_PUSHDOWN) &&
optimizer_flag(tab->join->thd, OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN) &&
tab->join->thd->lex->sql_command != SQLCOM_UPDATE_MULTI &&
tab->join->thd->lex->sql_command != SQLCOM_DELETE_MULTI &&
- tab->type != JT_CONST && tab->type != JT_SYSTEM)
+ tab->type != JT_CONST && tab->type != JT_SYSTEM &&
+ !(keyno == tab->table->s->primary_key && // (6)
+ tab->table->file->primary_key_is_clustered())) // (6)
+
{
DBUG_EXECUTE("where",
print_where(tab->select_cond, "full cond", QT_ORDINARY););
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 97f6dffe8e3..200c3efaa98 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -552,6 +552,14 @@ public:
increment_use_count(1);
use_count++;
}
+ void incr_refs_all()
+ {
+ for (SEL_ARG *pos=first(); pos ; pos=pos->next)
+ {
+ pos->increment_use_count(1);
+ }
+ use_count++;
+ }
void free_tree()
{
for (SEL_ARG *pos=first(); pos ; pos=pos->next)
@@ -1090,9 +1098,11 @@ int SEL_IMERGE::and_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree,
for (SEL_TREE** or_tree= trees; or_tree != trees_next; or_tree++)
{
SEL_TREE *res_or_tree= 0;
- if (!(res_or_tree= new SEL_TREE()))
+ SEL_TREE *and_tree= 0;
+ if (!(res_or_tree= new SEL_TREE()) ||
+ !(and_tree= new SEL_TREE(tree, TRUE, param)))
return (-1);
- if (!and_range_trees(param, *or_tree, tree, res_or_tree))
+ if (!and_range_trees(param, *or_tree, and_tree, res_or_tree))
{
if (new_imerge->or_sel_tree(param, res_or_tree))
return (-1);
@@ -1305,7 +1315,7 @@ SEL_TREE::SEL_TREE(SEL_TREE *arg, bool without_merges,
for (uint idx= 0; idx < param->keys; idx++)
{
if ((keys[idx]= arg->keys[idx]))
- keys[idx]->incr_refs();
+ keys[idx]->incr_refs_all();
}
if (without_merges)
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 8e043b17bcf..d01ef381806 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -4079,7 +4079,8 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
{
uint i;
DBUG_ENTER("setup_semijoin_dups_elimination");
-
+
+ join->complex_firstmatch_tables= table_map(0);
POSITION *pos= join->best_positions + join->const_tables;
for (i= join->const_tables ; i < join->top_join_tab_count; )
@@ -4098,6 +4099,11 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
{
/* We jump from the last table to the first one */
tab->loosescan_match_tab= tab + pos->n_sj_tables - 1;
+
+ /* LooseScan requires records to be produced in order */
+ if (tab->select && tab->select->quick)
+ tab->select->quick->need_sorted_output();
+
for (uint j= i; j < i + pos->n_sj_tables; j++)
join->join_tab[j].inside_loosescan_range= TRUE;
@@ -4107,6 +4113,7 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
for (uint kp=0; kp < pos->loosescan_picker.loosescan_parts; kp++)
keylen += tab->table->key_info[keyno].key_part[kp].store_length;
+ tab->loosescan_key= keyno;
tab->loosescan_key_len= keylen;
if (pos->n_sj_tables > 1)
tab[pos->n_sj_tables - 1].do_firstmatch= tab;
@@ -4164,8 +4171,13 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
{
JOIN_TAB *j;
JOIN_TAB *jump_to= tab-1;
+
+ bool complex_range= FALSE;
+ table_map tables_in_range= table_map(0);
+
for (j= tab; j != tab + pos->n_sj_tables; j++)
{
+ tables_in_range |= j->table->map;
if (!j->emb_sj_nest)
{
/*
@@ -4175,11 +4187,12 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
SELECT * FROM ot1, nt1 WHERE ot1.col IN (SELECT expr FROM it1, it2)
with a join order of
-
- ot1 it1 nt1 nt2
+ +----- FirstMatch range ----+
+ | |
+ ot1 it1 nt1 nt2 it2 it3 ...
| ^
- | +-------- 'j' point here
+ | +-------- 'j' points here
+------------- SJ_OPT_FIRST_MATCH was set for this table as
it's the first one that produces duplicates
@@ -4194,6 +4207,7 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
j[-1].do_firstmatch= jump_to;
jump_to= j; /* Jump back to us */
+ complex_range= TRUE;
}
else
{
@@ -4204,6 +4218,9 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
j[-1].do_firstmatch= jump_to;
i+= pos->n_sj_tables;
pos+= pos->n_sj_tables;
+
+ if (complex_range)
+ join->complex_firstmatch_tables|= tables_in_range;
break;
}
case SJ_OPT_NONE:
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 60830d8be69..9fece2dadb6 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -623,7 +623,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
if (!cond)
DBUG_RETURN(TRUE);
Field *field= field_part->field;
- if (!(cond->used_tables() & field->table->map))
+ if (cond->used_tables() & OUTER_REF_TABLE_BIT)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ if (!(cond->used_tables() & field->table->map) &&
+ test(cond->used_tables() & ~PSEUDO_TABLE_BITS))
{
/* Condition doesn't restrict the used table */
DBUG_RETURN(!cond->const_item());
diff --git a/sql/scheduler.cc b/sql/scheduler.cc
index 5b83bb4753e..2f8aa2bef11 100644
--- a/sql/scheduler.cc
+++ b/sql/scheduler.cc
@@ -512,7 +512,7 @@ static void libevent_connection_close(THD *thd)
thd->killed= KILL_CONNECTION; // Avoid error messages
- if (thd->net.vio->sd >= 0) // not already closed
+ if (thd->net.vio->type != VIO_CLOSED) // not already closed
{
end_connection(thd);
close_connection(thd, 0, 1);
diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am
index 892a720900e..8b7792481e6 100644
--- a/sql/share/Makefile.am
+++ b/sql/share/Makefile.am
@@ -49,6 +49,7 @@ install-data-local:
$(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets
# FIXME maybe shouldn't remove, could be needed by other installation?
+# Note that this removes the directory that support-files are using!
uninstall-local:
@RM@ -f -r $(DESTDIR)$(pkgdatadir)
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
new file mode 100644
index 00000000000..819b87e3fdc
--- /dev/null
+++ b/sql/signal_handler.cc
@@ -0,0 +1,286 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates.
+ Copyright (c) 2011, Monty Program Ab.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+#include "my_global.h"
+#include <signal.h>
+
+#include "mysql_priv.h"
+#include "my_stacktrace.h"
+
+#ifdef __WIN__
+#include <crtdbg.h>
+#define SIGNAL_FMT "exception 0x%x"
+#else
+#define SIGNAL_FMT "signal %d"
+#endif
+
+/*
+ We are handling signals/exceptions in this file.
+ Any global variables we read should be 'volatile sig_atomic_t'
+ to guarantee that we read some consistent value.
+ */
+static volatile sig_atomic_t segfaulted= 0;
+extern ulong max_used_connections;
+extern volatile sig_atomic_t calling_initgroups;
+#ifdef HAVE_NPTL
+extern volatile sig_atomic_t ld_assume_kernel_is_set;
+#endif
+
+/**
+ * Handler for fatal signals on POSIX, exception handler on Windows.
+ *
+ * Fatal events (seg.fault, bus error etc.) will trigger
+ * this signal handler. The handler will try to dump relevant
+ * debugging information to stderr and dump a core image.
+ *
+ * POSIX : Signal handlers should, if possible, only use a set of 'safe' system
+ * calls and library functions. A list of safe calls in POSIX systems
+ * are available at:
+ * http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+ *
+ * @param sig Signal number /Exception code
+*/
+extern "C" sig_handler handle_fatal_signal(int sig)
+{
+ time_t curr_time;
+ struct tm tm;
+#ifdef HAVE_STACKTRACE
+ THD *thd;
+#endif
+
+ if (segfaulted)
+ {
+ my_safe_printf_stderr("Fatal " SIGNAL_FMT " while backtracing\n", sig);
+ _exit(1); /* Quit without running destructors */
+ }
+
+ segfaulted = 1;
+
+ curr_time= my_time(0);
+ localtime_r(&curr_time, &tm);
+
+ my_safe_printf_stderr("%02d%02d%02d %2d:%02d:%02d ",
+ tm.tm_year % 100, tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (opt_expect_abort
+#ifdef _WIN32
+ && sig == EXCEPTION_BREAKPOINT /* __debugbreak in my_sigabrt_hander() */
+#else
+ && sig == SIGABRT
+#endif
+ )
+ {
+ fprintf(stderr,"[Note] mysqld did an expected abort\n");
+ goto end;
+ }
+
+ my_safe_printf_stderr("[ERROR] mysqld got " SIGNAL_FMT " ;\n",sig);
+
+ my_safe_printf_stderr("%s",
+ "This could be because you hit a bug. It is also possible that this binary\n"
+ "or one of the libraries it was linked against is corrupt, improperly built,\n"
+ "or misconfigured. This error can also be caused by malfunctioning hardware.\n\n");
+
+ my_safe_printf_stderr("%s",
+ "To report this bug, see http://kb.askmonty.org/en/reporting-bugs\n\n");
+
+ my_safe_printf_stderr("%s",
+ "We will try our best to scrape up some info that will hopefully help\n"
+ "diagnose the problem, but since we have already crashed, \n"
+ "something is definitely wrong and this may fail.\n\n");
+
+ set_server_version();
+ my_safe_printf_stderr("Server version: %s\n", server_version);
+ my_safe_printf_stderr("key_buffer_size=%lu\n",
+ (ulong) dflt_key_cache->key_cache_mem_size);
+
+ my_safe_printf_stderr("read_buffer_size=%ld\n",
+ (long) global_system_variables.read_buff_size);
+
+ my_safe_printf_stderr("max_used_connections=%lu\n",
+ (ulong) max_used_connections);
+
+ my_safe_printf_stderr("max_threads=%u\n",
+ (uint) thread_scheduler.max_threads);
+
+ my_safe_printf_stderr("thread_count=%u\n", (uint) thread_count);
+
+ my_safe_printf_stderr("connection_count=%u\n", (uint) connection_count);
+
+ my_safe_printf_stderr("It is possible that mysqld could use up to \n"
+ "key_buffer_size + "
+ "(read_buffer_size + sort_buffer_size)*max_threads = "
+ "%lu K bytes of memory\n",
+ ((ulong) dflt_key_cache->key_cache_mem_size +
+ (global_system_variables.read_buff_size +
+ global_system_variables.sortbuff_size) *
+ thread_scheduler.max_threads +
+ max_connections * sizeof(THD)) / 1024);
+
+ my_safe_printf_stderr("%s",
+ "Hope that's ok; if not, decrease some variables in the equation.\n\n");
+
+#if defined(HAVE_LINUXTHREADS)
+ if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
+ {
+ my_safe_printf_stderr(
+ "You seem to be running 32-bit Linux and have "
+ "%d concurrent connections.\n"
+ "If you have not changed STACK_SIZE in LinuxThreads "
+ "and built the binary \n"
+ "yourself, LinuxThreads is quite likely to steal "
+ "a part of the global heap for\n"
+ "the thread stack. Please read "
+ "http://dev.mysql.com/doc/mysql/en/linux-installation.html\n\n"
+ thread_count);
+ }
+#endif /* HAVE_LINUXTHREADS */
+
+#ifdef HAVE_STACKTRACE
+ thd= current_thd;
+
+ if (opt_stack_trace)
+ {
+ my_safe_printf_stderr("Thread pointer: 0x%p\n", thd);
+ my_safe_printf_stderr("%s",
+ "Attempting backtrace. You can use the following "
+ "information to find out\n"
+ "where mysqld died. If you see no messages after this, something went\n"
+ "terribly wrong...\n");
+ my_print_stacktrace(thd ? (uchar*) thd->thread_stack : NULL,
+ my_thread_stack_size);
+ }
+ if (thd)
+ {
+ const char *kreason= "UNKNOWN";
+ switch (thd->killed) {
+ case NOT_KILLED:
+ case KILL_HARD_BIT:
+ kreason= "NOT_KILLED";
+ break;
+ case KILL_BAD_DATA:
+ case KILL_BAD_DATA_HARD:
+ kreason= "KILL_BAD_DATA";
+ break;
+ case KILL_CONNECTION:
+ case KILL_CONNECTION_HARD:
+ kreason= "KILL_CONNECTION";
+ break;
+ case KILL_QUERY:
+ case KILL_QUERY_HARD:
+ kreason= "KILL_QUERY";
+ break;
+ case KILL_SYSTEM_THREAD:
+ case KILL_SYSTEM_THREAD_HARD:
+ kreason= "KILL_SYSTEM_THREAD";
+ break;
+ case KILL_SERVER:
+ case KILL_SERVER_HARD:
+ kreason= "KILL_SERVER";
+ break;
+ }
+ my_safe_printf_stderr("%s", "\n"
+ "Trying to get some variables.\n"
+ "Some pointers may be invalid and cause the dump to abort.\n");
+
+ my_safe_printf_stderr("Query (%p): ", thd->query());
+ my_safe_print_str(thd->query(), min(65535, thd->query_length()));
+ my_safe_printf_stderr("Connection ID (thread ID): %lu\n",
+ (ulong) thd->thread_id);
+ my_safe_printf_stderr("Status: %s\n", kreason);
+
+ ulonglong optsw= thd->variables.optimizer_switch;
+ const char **optimizer_switch_names= optimizer_switch_typelib.type_names;
+ my_safe_printf_stderr("Optimizer switch: ");
+ for (uint i= 0; optimizer_switch_names[i+1]; i++, optsw >>= 1)
+ {
+ if (i)
+ my_safe_printf_stderr(",");
+ my_safe_printf_stderr("%s=%s",
+ optimizer_switch_names[i],
+ optsw & 1 ? "on" : "off");
+ }
+ my_safe_printf_stderr("\n\n");
+ }
+ my_safe_printf_stderr("%s",
+ "The manual page at "
+ "http://dev.mysql.com/doc/mysql/en/crashing.html contains\n"
+ "information that should help you find out what is causing the crash.\n");
+
+#endif /* HAVE_STACKTRACE */
+
+#ifdef HAVE_INITGROUPS
+ if (calling_initgroups)
+ {
+ my_safe_printf_stderr("%s", "\n"
+ "This crash occured while the server was calling initgroups(). This is\n"
+ "often due to the use of a mysqld that is statically linked against \n"
+ "glibc and configured to use LDAP in /etc/nsswitch.conf.\n"
+ "You will need to either upgrade to a version of glibc that does not\n"
+ "have this problem (2.3.4 or later when used with nscd),\n"
+ "disable LDAP in your nsswitch.conf, or use a "
+ "mysqld that is not statically linked.\n");
+ }
+#endif
+
+#ifdef HAVE_NPTL
+ if (thd_lib_detected == THD_LIB_LT && !ld_assume_kernel_is_set)
+ {
+ my_safe_printf_stderr("%s",
+ "You are running a statically-linked LinuxThreads binary on an NPTL\n"
+ "system. This can result in crashes on some distributions due to "
+ "LT/NPTL conflicts.\n"
+ "You should either build a dynamically-linked binary, "
+ "or force LinuxThreads\n"
+ "to be used with the LD_ASSUME_KERNEL environment variable.\n"
+ "Please consult the documentation for your distribution "
+ "on how to do that.\n");
+ }
+#endif
+
+ if (locked_in_memory)
+ {
+ my_safe_printf_stderr("%s", "\n"
+ "The \"--memlock\" argument, which was enabled, "
+ "uses system calls that are\n"
+ "unreliable and unstable on some operating systems and "
+ "operating-system versions (notably, some versions of Linux).\n"
+ "This crash could be due to use of those buggy OS calls.\n"
+ "You should consider whether you really need the "
+ "\"--memlock\" parameter and/or consult the OS distributer about "
+ "\"mlockall\" bugs.\n");
+ }
+
+#ifdef HAVE_WRITE_CORE
+ if (test_flags & TEST_CORE_ON_SIGNAL)
+ {
+ my_safe_printf_stderr("%s", "Writing a core file\n");
+ fflush(stderr);
+ my_write_core(sig);
+ }
+#endif
+
+end:
+#ifndef __WIN__
+ /*
+ Quit, without running destructors (etc.)
+ On Windows, do not terminate, but pass control to exception filter.
+ */
+ _exit(1); // Using _exit(), since exit() is not async signal safe
+#else
+ return;
+#endif
+}
diff --git a/sql/slave.cc b/sql/slave.cc
index 363c1eb9a17..7935754e25c 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2683,9 +2683,8 @@ pthread_handler_t handle_slave_io(void *arg)
if (init_slave_thread(thd, SLAVE_THD_IO))
{
pthread_cond_broadcast(&mi->start_cond);
- pthread_mutex_unlock(&mi->run_lock);
sql_print_error("Failed during slave I/O thread initialization");
- goto err;
+ goto err_during_init;
}
pthread_mutex_lock(&LOCK_thread_count);
threads.append(thd);
@@ -2953,6 +2952,7 @@ err:
thd_proc_info(thd, "Waiting for slave mutex on exit");
pthread_mutex_lock(&mi->run_lock);
+err_during_init:
/* Forget the relay log's format */
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= 0;
@@ -3076,10 +3076,9 @@ pthread_handler_t handle_slave_sql(void *arg)
will be stuck if we fail here
*/
pthread_cond_broadcast(&rli->start_cond);
- pthread_mutex_unlock(&rli->run_lock);
rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
"Failed during slave thread initialization");
- goto err;
+ goto err_during_init;
}
thd->init_for_queries();
thd->temporary_tables = rli->save_temporary_tables; // restore temp tables
@@ -3332,6 +3331,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
thd->reset_db(NULL, 0);
thd_proc_info(thd, "Waiting for slave mutex on exit");
pthread_mutex_lock(&rli->run_lock);
+err_during_init:
/* We need data_lock, at least to wake up any waiting master_pos_wait() */
pthread_mutex_lock(&rli->data_lock);
DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 1c31e331965..dc664b2779b 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -7494,22 +7494,9 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
ulong client_capabilities= uint2korr(net->read_pos);
if (client_capabilities & CLIENT_PROTOCOL_41)
{
- if (pkt_len < 32)
+ if (pkt_len < 4)
return packet_error;
client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16;
- thd->max_client_packet_length= uint4korr(net->read_pos+4);
- DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
- if (thd_init_client_charset(thd, (uint) net->read_pos[8]))
- return packet_error;
- thd->update_charset();
- end= (char*) net->read_pos+32;
- }
- else
- {
- if (pkt_len < 5)
- return packet_error;
- thd->max_client_packet_length= uint3korr(net->read_pos+2);
- end= (char*) net->read_pos+5;
}
/* Disable those bits which are not supported by the client. */
@@ -7544,6 +7531,25 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
}
}
+ if (client_capabilities & CLIENT_PROTOCOL_41)
+ {
+ if (pkt_len < 32)
+ return packet_error;
+ thd->max_client_packet_length= uint4korr(net->read_pos+4);
+ DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
+ if (thd_init_client_charset(thd, (uint) net->read_pos[8]))
+ return packet_error;
+ thd->update_charset();
+ end= (char*) net->read_pos+32;
+ }
+ else
+ {
+ if (pkt_len < 5)
+ return packet_error;
+ thd->max_client_packet_length= uint3korr(net->read_pos+2);
+ end= (char*) net->read_pos+5;
+ }
+
if (end >= (char*) net->read_pos+ pkt_len +2)
return packet_error;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 558e7b0aa9e..11fd5db2020 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1707,11 +1707,12 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
t_name= table->table_name;
t_alias= table->alias;
+retry:
DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name));
- for (;;)
+ for (TABLE_LIST *tl= table_list;;)
{
- if (((! (res= find_table_in_global_list(table_list, d_name, t_name))) &&
- (! (res= mysql_lock_have_duplicate(thd, table, table_list)))) ||
+ if (((! (res= find_table_in_global_list(tl, d_name, t_name))) &&
+ (! (res= mysql_lock_have_duplicate(thd, table, tl)))) ||
((!res->table || res->table != table->table) &&
(!check_alias || !(lower_case_table_names ?
my_strcasecmp(files_charset_info, t_alias, res->alias) :
@@ -1724,10 +1725,23 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
processed in derived table or top select of multi-update/multi-delete
(exclude_from_table_unique_test) or prelocking placeholder.
*/
- table_list= res->next_global;
+ tl= res->next_global;
DBUG_PRINT("info",
("found same copy of table or table which we should skip"));
}
+ if (res && res->belong_to_derived)
+ {
+ /* Try to fix */
+ TABLE_LIST *derived= res->belong_to_derived;
+ if (derived->is_merged_derived())
+ {
+ DBUG_PRINT("info",
+ ("convert merged to materialization to resolve the conflict"));
+ derived->change_refs_to_fields();
+ derived->set_materialized_derived();
+ }
+ goto retry;
+ }
DBUG_RETURN(res);
}
@@ -7197,11 +7211,16 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
if (!(eq_cond= new Item_func_eq(item_ident_1, item_ident_2)))
goto err; /* Out of memory. */
+ if (field_1 && field_1->vcol_info)
+ field_1->table->mark_virtual_col(field_1);
+ if (field_2 && field_2->vcol_info)
+ field_2->table->mark_virtual_col(field_2);
+
/*
Add the new equi-join condition to the ON clause. Notice that
fix_fields() is applied to all ON conditions in setup_conds()
so we don't do it here.
- */
+ */
add_join_on((table_ref_1->outer_join & JOIN_TYPE_RIGHT ?
table_ref_1 : table_ref_2),
eq_cond);
@@ -7922,7 +7941,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
while ((table_list= ti++))
{
TABLE *table= table_list->table;
- table->pos_in_table_list= table_list;
+ if (table)
+ table->pos_in_table_list= table_list;
if (first_select_table &&
table_list->top_table() == first_select_table)
{
@@ -7935,7 +7955,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
{
table_list->jtbm_table_no= tablenr;
}
- else
+ else if (table)
{
table->pos_in_table_list= table_list;
setup_table_map(table, table_list, tablenr);
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 16b6d78576a..fbe42c85711 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1452,10 +1452,10 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
In case the wait time can't be determined there is an upper limit which
causes try_lock() to abort with a time out.
- The 'TRUE' parameter indicate that the lock is allowed to timeout
+ The 'TIMEOUT' parameter indicate that the lock is allowed to timeout
*/
- if (try_lock(thd, Query_cache::WAIT))
+ if (try_lock(thd, Query_cache::TIMEOUT))
DBUG_VOID_RETURN;
if (query_cache_size == 0)
{
@@ -1659,6 +1659,17 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
if (is_disabled() || thd->locked_tables ||
thd->variables.query_cache_type == 0)
goto err;
+
+ /*
+ The following can only happen for prepared statements that was found
+ during parsing or later that the query was not cacheable.
+ */
+ if (!thd->lex->safe_to_cache_query)
+ {
+ DBUG_PRINT("qcache", ("SELECT is non-cacheable"));
+ goto err;
+ }
+
DBUG_ASSERT(query_cache_size != 0); // otherwise cache would be disabled
thd->query_cache_is_applicable= 1;
@@ -1781,9 +1792,9 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
disabled or if a full cache flush is in progress, the attempt to
get the lock is aborted.
- The WAIT parameter indicate that the lock is allowed to timeout.
+ The TIMEOUT parameter indicate that the lock is allowed to timeout.
*/
- if (try_lock(thd, Query_cache::WAIT))
+ if (try_lock(thd, Query_cache::TIMEOUT))
goto err;
if (query_cache_size == 0)
@@ -1950,6 +1961,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
faster.
*/
thd->query_cache_is_applicable= 0; // Query can't be cached
+ thd->lex->safe_to_cache_query= 0; // For prepared statements
BLOCK_UNLOCK_RD(query_block);
DBUG_RETURN(-1);
}
@@ -1966,6 +1978,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
table_list.db, table_list.alias));
unlock();
thd->query_cache_is_applicable= 0; // Query can't be cached
+ thd->lex->safe_to_cache_query= 0; // For prepared statements
BLOCK_UNLOCK_RD(query_block);
DBUG_RETURN(-1); // Privilege error
}
@@ -1975,6 +1988,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
table_list.db, table_list.alias));
BLOCK_UNLOCK_RD(query_block);
thd->query_cache_is_applicable= 0; // Query can't be cached
+ thd->lex->safe_to_cache_query= 0; // For prepared statements
goto err_unlock; // Parse query
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
@@ -1998,7 +2012,13 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
table->key_length());
}
else
+ {
+ /*
+ As this can change from call to call, don't reset set
+ thd->lex->safe_to_cache_query
+ */
thd->query_cache_is_applicable= 0; // Query can't be cached
+ }
goto err_unlock; // Parse query
}
else
@@ -3886,6 +3906,7 @@ Query_cache::process_and_count_tables(THD *thd, TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("Don't cache statement as it refers to "
"tables with column privileges."));
thd->query_cache_is_applicable= 0; // Query can't be cached
+ thd->lex->safe_to_cache_query= 0; // For prepared statements
DBUG_RETURN(0);
}
#endif
@@ -4022,6 +4043,10 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias));
+ /*
+ As this can change from call to call, don't reset set
+ thd->lex->safe_to_cache_query
+ */
thd->query_cache_is_applicable= 0; // Query can't be cached
DBUG_RETURN(1);
}
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index eeaac8f6b62..b2e88a3c619 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -429,8 +429,8 @@ protected:
uint def_query_hash_size = QUERY_CACHE_DEF_QUERY_HASH_SIZE,
uint def_table_hash_size = QUERY_CACHE_DEF_TABLE_HASH_SIZE);
- bool is_disabled(void) { return m_cache_status != OK; }
- bool is_disable_in_progress(void)
+ inline bool is_disabled(void) { return m_cache_status != OK; }
+ inline bool is_disable_in_progress(void)
{ return m_cache_status == DISABLE_REQUEST; }
/* initialize cache (mutex) */
diff --git a/sql/sql_class.h b/sql/sql_class.h
index f7d44eeec52..c5f500a6b18 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -577,7 +577,9 @@ typedef struct system_status_var
calls made by range access. The intent is to count only calls made by
BatchedKeyAccess.
*/
- ulong ha_multi_range_read_init_count;
+ ulong ha_mrr_init_count;
+ ulong ha_mrr_key_refills_count;
+ ulong ha_mrr_rowid_refills_count;
ulong ha_rollback_count;
ulong ha_update_count;
@@ -586,6 +588,8 @@ typedef struct system_status_var
ulong ha_tmp_update_count;
ulong ha_tmp_write_count;
ulong ha_prepare_count;
+ ulong ha_icp_attempts;
+ ulong ha_icp_match;
ulong ha_discover_count;
ulong ha_savepoint_count;
ulong ha_savepoint_rollback_count;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 3ce375190a7..84e88196f20 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -68,7 +68,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (mysql_handle_list_of_derived(thd->lex, table_list, DT_PREPARE))
DBUG_RETURN(TRUE);
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
DBUG_RETURN(TRUE);
@@ -526,7 +526,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds)
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
setup_ftfuncs(select_lex))
DBUG_RETURN(TRUE);
- if (!table_list->updatable || check_key_in_view(thd, table_list))
+ if (!table_list->single_table_updatable() ||
+ check_key_in_view(thd, table_list))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
DBUG_RETURN(TRUE);
@@ -622,7 +623,7 @@ int mysql_multi_delete_prepare(THD *thd)
DBUG_RETURN(TRUE);
}
- if (!target_tbl->correspondent_table->updatable ||
+ if (!target_tbl->correspondent_table->single_table_updatable() ||
check_key_in_view(thd, target_tbl->correspondent_table))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 0955f9c0982..02a26254336 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -483,7 +483,7 @@ bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived)
return mysql_derived_prepare(thd, lex, derived);
if (!derived->is_multitable())
{
- if (!derived->updatable)
+ if (!derived->single_table_updatable())
return derived->create_field_translation(thd);
if (derived->merge_underlying_list)
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index d01a594708e..a4943d5e13a 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -103,7 +103,8 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view);
*/
bool check_view_single_update(List<Item> &fields, List<Item> *values,
- TABLE_LIST *view, table_map *map)
+ TABLE_LIST *view, table_map *map,
+ bool insert)
{
/* it is join view => we need to find the table for update */
List_iterator_fast<Item> it(fields);
@@ -140,6 +141,14 @@ bool check_view_single_update(List<Item> &fields, List<Item> *values,
*/
tbl->table->insert_values= view->table->insert_values;
view->table= tbl->table;
+ if (!tbl->single_table_updatable())
+ {
+ if (insert)
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT");
+ else
+ my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "UPDATE");
+ return TRUE;
+ }
*map= tables;
return FALSE;
@@ -184,7 +193,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
{
TABLE *table= table_list->table;
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
return -1;
@@ -260,7 +269,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (check_view_single_update(fields,
fields_and_values_from_different_maps ?
(List<Item>*) 0 : &values,
- table_list, map))
+ table_list, map, true))
return -1;
table= table_list->table;
}
@@ -347,7 +356,7 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
if (insert_table_list->is_view() &&
insert_table_list->is_merged_derived() &&
check_view_single_update(update_fields, &update_values,
- insert_table_list, map))
+ insert_table_list, map, false))
return -1;
if (table->timestamp_field)
@@ -1159,7 +1168,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
bool insert_into_view= (table_list->view != 0);
DBUG_ENTER("mysql_prepare_insert_check_table");
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT");
DBUG_RETURN(TRUE);
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index af4b157ba90..a960c63e782 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -898,6 +898,8 @@ int JOIN_CACHE::alloc_buffer()
curr_buff_space_sz+= cache->get_join_buffer_size();
}
}
+ curr_min_buff_space_sz+= min_buff_size;
+ curr_buff_space_sz+= buff_size;
if (curr_min_buff_space_sz > join_buff_space_limit ||
(curr_buff_space_sz > join_buff_space_limit &&
@@ -2109,16 +2111,6 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
goto finish;
}
- if (outer_join_first_inner)
- {
- /*
- All null complemented rows have been already generated for all
- outer records from join buffer. Restore the state of the
- first_unmatched values to 0 to avoid another null complementing.
- */
- for (tab= join_tab->first_inner; tab <= join_tab->last_inner; tab++)
- tab->first_unmatched= 0;
- }
if (skip_last)
{
@@ -2131,6 +2123,16 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
}
finish:
+ if (outer_join_first_inner)
+ {
+ /*
+ All null complemented rows have been already generated for all
+ outer records from join buffer. Restore the state of the
+ first_unmatched values to 0 to avoid another null complementing.
+ */
+ for (tab= join_tab->first_inner; tab <= join_tab->last_inner; tab++)
+ tab->first_unmatched= 0;
+ }
restore_last_record();
reset(TRUE);
DBUG_PRINT("exit", ("rc: %d", rc));
@@ -2574,6 +2576,15 @@ void JOIN_CACHE::print_explain_comment(String *str)
str->append(STRING_WITH_LEN(")"));
}
+/**
+ get thread handle.
+*/
+
+THD *JOIN_CACHE::thd()
+{
+ return join->thd;
+}
+
static void add_mrr_explain_info(String *str, uint mrr_mode, handler *file)
{
@@ -4013,7 +4024,11 @@ bool bka_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
DBUG_ENTER("bka_skip_index_tuple");
JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
- bool res= cache->skip_index_tuple(range_info);
+ THD *thd= cache->thd();
+ bool res;
+ status_var_increment(thd->status_var.ha_icp_attempts);
+ if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
DBUG_RETURN(res);
}
@@ -4488,7 +4503,12 @@ bool bkah_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
DBUG_ENTER("bka_unique_skip_index_tuple");
JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
- DBUG_RETURN(cache->skip_index_tuple(range_info));
+ THD *thd= cache->thd();
+ bool res;
+ status_var_increment(thd->status_var.ha_icp_attempts);
+ if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
+ DBUG_RETURN(res);
}
diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h
index f5d64d5530a..ba8e4ba8e4a 100644
--- a/sql/sql_join_cache.h
+++ b/sql/sql_join_cache.h
@@ -643,6 +643,8 @@ public:
/* Add a comment on the join algorithm employed by the join cache */
virtual void print_explain_comment(String *str);
+ THD *thd();
+
virtual ~JOIN_CACHE() {}
void reset_join(JOIN *j) { join= j; }
void free()
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index f29f51325a9..4a69cd3b1fa 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2885,6 +2885,7 @@ void st_lex::cleanup_after_one_table_open()
if (all_selects_list != &select_lex)
{
derived_tables= 0;
+ select_lex.exclude_from_table_unique_test= false;
/* cleunup underlying units (units of VIEW) */
for (SELECT_LEX_UNIT *un= select_lex.first_inner_unit();
un;
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 873a8656ebe..adedd9a3a4d 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -257,7 +257,12 @@ public:
last= &first;
return tmp->info;
}
- inline void disjoin(base_list *list)
+ /*
+ Remove from this list elements that are contained in the passed list.
+ We assume that the passed list is a tail of this list (that is, the whole
+ list_node* elements are shared).
+ */
+ inline void disjoin(const base_list *list)
{
list_node **prev= &first;
list_node *node= first;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 42a8f001c6f..7fcb52a7b9c 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -187,7 +187,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
INSERT_ACL | UPDATE_ACL, FALSE))
DBUG_RETURN(-1);
if (!table_list->table || // do not suport join view
- !table_list->updatable || // and derived tables
+ !table_list->single_table_updatable() || // and derived tables
check_key_in_view(thd, table_list))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "LOAD");
diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc
index cf0a73d0ce7..57fe5072dcd 100644
--- a/sql/sql_manager.cc
+++ b/sql/sql_manager.cc
@@ -44,6 +44,7 @@ static struct handler_cb * volatile cb_list;
bool mysql_manager_submit(void (*action)())
{
bool result= FALSE;
+ DBUG_ASSERT(manager_thread_in_use);
struct handler_cb * volatile *cb;
pthread_mutex_lock(&LOCK_manager);
cb= &cb_list;
@@ -75,8 +76,9 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
pthread_detach_this_thread();
manager_thread = pthread_self();
+ (void) pthread_cond_init(&COND_manager,NULL);
+ (void) pthread_mutex_init(&LOCK_manager,NULL);
manager_thread_in_use = 1;
-
for (;;)
{
pthread_mutex_lock(&LOCK_manager);
@@ -123,6 +125,8 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
}
}
manager_thread_in_use = 0;
+ (void) pthread_mutex_destroy(&LOCK_manager);
+ (void) pthread_cond_destroy(&COND_manager);
DBUG_LEAVE; // Can't use DBUG_RETURN after my_thread_end
my_thread_end();
return (NULL);
@@ -149,14 +153,14 @@ void stop_handle_manager()
{
DBUG_ENTER("stop_handle_manager");
abort_manager = true;
- pthread_mutex_lock(&LOCK_manager);
if (manager_thread_in_use)
{
+ pthread_mutex_lock(&LOCK_manager);
DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: 0x%lx",
(ulong)manager_thread));
- pthread_cond_signal(&COND_manager);
+ pthread_cond_signal(&COND_manager);
+ pthread_mutex_unlock(&LOCK_manager);
}
- pthread_mutex_unlock(&LOCK_manager);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 1676d4a09f4..e84750369f9 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -191,7 +191,8 @@ bool end_active_trans(THD *thd)
if (ha_commit(thd))
error=1;
#ifdef WITH_ARIA_STORAGE_ENGINE
- ha_maria::implicit_commit(thd, TRUE);
+ if (ha_storage_engine_is_enabled(maria_hton))
+ ha_maria::implicit_commit(thd, TRUE);
#endif
}
thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
@@ -1231,6 +1232,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char *beginning_of_next_stmt= (char*) end_of_stmt;
#ifdef WITH_ARIA_STORAGE_ENGINE
+ if (ha_storage_engine_is_enabled(maria_hton))
ha_maria::implicit_commit(thd, FALSE);
#endif
@@ -1608,7 +1610,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->transaction.stmt.reset();
#ifdef WITH_ARIA_STORAGE_ENGINE
- ha_maria::implicit_commit(thd, FALSE);
+ if (ha_storage_engine_is_enabled(maria_hton))
+ ha_maria::implicit_commit(thd, FALSE);
#endif
if (!(sql_command_flags[thd->lex->sql_command] & CF_CHANGES_DATA))
@@ -7363,13 +7366,23 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
if (!threads_to_kill.is_empty())
{
List_iterator_fast<THD> it(threads_to_kill);
- THD *ptr;
- while ((ptr= it++))
+ THD *next_ptr;
+ THD *ptr= it++;
+ do
{
ptr->awake(kill_signal);
+ /*
+ Careful here: The list nodes are allocated on the memroots of the
+ THDs to be awakened.
+ But those THDs may be terminated and deleted as soon as we release
+ LOCK_thd_data, which will make the list nodes invalid.
+ Since the operation "it++" dereferences the "next" pointer of the
+ previous list node, we need to do this while holding LOCK_thd_data.
+ */
+ next_ptr= it++;
pthread_mutex_unlock(&ptr->LOCK_thd_data);
(*rows)++;
- }
+ } while ((ptr= next_ptr));
}
DBUG_RETURN(0);
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 611c8ddf944..32ce8dc6fc4 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1274,7 +1274,7 @@ static int mysql_test_update(Prepared_statement *stmt,
if (table_list->handle_derived(thd->lex, DT_PREPARE))
goto error;
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
goto error;
@@ -1348,7 +1348,7 @@ static bool mysql_test_delete(Prepared_statement *stmt,
if (mysql_handle_derived(thd->lex, DT_PREPARE))
goto error;
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
goto error;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 907905fd47f..6cf64c4f7c3 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1266,9 +1266,20 @@ JOIN::optimize()
Item *ref_item= *ref_item_ptr;
if (!ref_item->used_tables() && !(select_options & SELECT_DESCRIBE))
continue;
- COND_EQUAL *equals= tab->first_inner ? tab->first_inner->cond_equal :
- cond_equal;
- ref_item= substitute_for_best_equal_field(tab, ref_item, equals, map2table);
+ COND_EQUAL *equals= cond_equal;
+ JOIN_TAB *first_inner= tab->first_inner;
+ while (equals)
+ {
+ ref_item= substitute_for_best_equal_field(tab, ref_item,
+ equals, map2table);
+ if (first_inner)
+ {
+ equals= first_inner->cond_equal;
+ first_inner= first_inner->first_upper;
+ }
+ else
+ equals= 0;
+ }
ref_item->update_used_tables();
if (*ref_item_ptr != ref_item)
{
@@ -8919,7 +8930,7 @@ void revise_cache_usage(JOIN_TAB *join_tab)
first_inner;
first_inner= first_inner->first_upper)
{
- for (tab= end_tab-1; tab >= first_inner; tab--)
+ for (tab= end_tab; tab >= first_inner; tab--)
set_join_cache_denial(tab);
end_tab= first_inner;
}
@@ -8927,7 +8938,7 @@ void revise_cache_usage(JOIN_TAB *join_tab)
else if (join_tab->first_sj_inner_tab)
{
first_inner= join_tab->first_sj_inner_tab;
- for (tab= join_tab-1; tab >= first_inner; tab--)
+ for (tab= join_tab; tab >= first_inner; tab--)
{
set_join_cache_denial(tab);
}
@@ -9169,6 +9180,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
if (tab->use_quick == 2)
goto no_join_cache;
+
+ if (tab->table->map & join->complex_firstmatch_tables)
+ goto no_join_cache;
/*
Don't use join cache if we're inside a join tab range covered by LooseScan
@@ -9226,7 +9240,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
Check whether table tab and the previous one belong to the same nest of
inner tables and if so do not use join buffer when joining table tab.
*/
- if (tab->first_inner)
+ if (tab->first_inner && tab != tab->first_inner)
{
for (JOIN_TAB *first_inner= tab[-1].first_inner;
first_inner;
@@ -9236,7 +9250,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
goto no_join_cache;
}
}
- else if (tab->first_sj_inner_tab &&
+ else if (tab->first_sj_inner_tab && tab != tab->first_sj_inner_tab &&
tab->first_sj_inner_tab == tab[-1].first_sj_inner_tab)
goto no_join_cache;
}
@@ -9379,7 +9393,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options,
{
tab->used_join_cache_level= join->max_allowed_join_cache_level;
}
-
+
uint idx= join->const_tables;
for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
tab;
@@ -9464,6 +9478,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
bool sorted= 1;
+ join->complex_firstmatch_tables= table_map(0);
+
if (!join->select_lex->sj_nests.is_empty() &&
setup_semijoin_dups_elimination(join, options, no_jbuf_after))
DBUG_RETURN(TRUE); /* purecov: inspected */
@@ -10343,6 +10359,15 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
}
+ /*
+ Cleanup to avoid interference of calls of this function for
+ ORDER BY and GROUP BY
+ */
+ for (JOIN_TAB *tab= join->join_tab + join->const_tables;
+ tab < join->join_tab + join->table_count;
+ tab++)
+ tab->cached_eq_ref_table= FALSE;
+
prev_ptr= &first_order;
*simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1;
@@ -10433,10 +10458,22 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
if (send_row)
{
+ /*
+ Set all tables to have NULL row. This is needed as we will be evaluating
+ HAVING condition.
+ */
List_iterator<TABLE_LIST> ti(tables);
TABLE_LIST *table;
while ((table= ti++))
- mark_as_null_row(table->table); // All fields are NULL
+ {
+ /*
+ Don't touch semi-join materialization tables, as the above join_free()
+ call has freed them (and HAVING clause can't have references to them
+ anyway).
+ */
+ if (!table->is_jtbm())
+ mark_as_null_row(table->table); // All fields are NULL
+ }
if (having &&
!having->walk(&Item::clear_sum_processor, FALSE, NULL) &&
having->val_int() == 0)
@@ -11492,7 +11529,7 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
}
/*
- Check if "item_field=head" equality is already guaranteed to be true
+ Check if "field_item=head" equality is already guaranteed to be true
on upper AND-levels.
*/
if (upper)
@@ -11504,7 +11541,8 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
Item_equal_fields_iterator li(*item_equal);
while ((item= li++) != field_item)
{
- if (item->find_item_equal(upper_levels) == upper)
+ if (embedding_sjm(item) == field_sjm &&
+ item->find_item_equal(upper_levels) == upper)
break;
}
}
@@ -11648,7 +11686,7 @@ static COND* substitute_for_best_equal_field(JOIN_TAB *context_tab,
if (and_level)
{
cond_equal= &((Item_cond_and *) cond)->cond_equal;
- cond_list->disjoin((List<Item> *) &cond_equal->current_level);
+ cond_list->disjoin((List<Item> *) &cond_equal->current_level);/* remove Item_equal objects from the AND. */
List_iterator_fast<Item_equal> it(cond_equal->current_level);
while ((item_equal= it++))
@@ -12685,9 +12723,8 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
multiple equality contains a constant.
*/
DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
- conds= build_equal_items(join->thd, conds, NULL, join_list,
- &join->cond_equal);
- DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+ conds= build_equal_items(join->thd, conds, NULL, join_list, cond_equal);
+ DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
/* change field = field to field = const for each found field = const */
propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
@@ -15392,7 +15429,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (join_tab->loosescan_match_tab &&
join_tab->loosescan_match_tab->found_match)
{
- KEY *key= join_tab->table->key_info + join_tab->index;
+ KEY *key= join_tab->table->key_info + join_tab->loosescan_key;
key_copy(join_tab->loosescan_buf, join_tab->table->record[0], key,
join_tab->loosescan_key_len);
skip_over= TRUE;
@@ -15402,7 +15439,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (skip_over && !error)
{
- if(!key_cmp(join_tab->table->key_info[join_tab->index].key_part,
+ if(!key_cmp(join_tab->table->key_info[join_tab->loosescan_key].key_part,
join_tab->loosescan_buf, join_tab->loosescan_key_len))
{
/*
@@ -18405,7 +18442,6 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
table->sort.io_cache= NULL;
select->cleanup(); // filesort did select
- tab->select= 0;
table->quick_keys.clear_all(); // as far as we cleanup select->quick
table->intersect_keys.clear_all();
table->sort.io_cache= tablesort_result_cache;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 8b448130eaf..a5fa59a070a 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -379,6 +379,12 @@ typedef struct st_join_table {
/* Buffer to save index tuple to be able to skip duplicates */
uchar *loosescan_buf;
+ /*
+ Index used by LooseScan (we store it here separately because ref access
+ stores it in tab->ref.key, while range scan stores it in tab->index, etc)
+ */
+ uint loosescan_key;
+
/* Length of key tuple (depends on #keyparts used) to store in the above */
uint loosescan_key_len;
@@ -989,6 +995,13 @@ public:
/* We also maintain a stack of join optimization states in * join->positions[] */
/******* Join optimization state members end *******/
+
+ /*
+ Tables within complex firstmatch ranges (i.e. those where inner tables are
+ interleaved with outer tables). Join buffering cannot be used for these.
+ */
+ table_map complex_firstmatch_tables;
+
/*
The cost of best complete join plan found so far during optimization,
after optimization phase - cost of picked join order (not taking into
@@ -1443,6 +1456,7 @@ public:
virtual ~store_key() {} /** Not actually needed */
virtual enum Type type() const=0;
virtual const char *name() const=0;
+ virtual bool store_key_is_const() { return false; }
/**
@brief sets ignore truncation warnings mode and calls the real copy method
@@ -1596,6 +1610,7 @@ public:
enum Type type() const { return CONST_ITEM_STORE_KEY; }
const char *name() const { return "const"; }
+ bool store_key_is_const() { return true; }
protected:
enum store_key_result copy_inner()
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index be4c0e3e0d0..222f6e07bc2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -5254,6 +5254,11 @@ bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables,
DBUG_RETURN(TRUE);
}
pthread_mutex_unlock(&LOCK_global_system_variables);
+ if (!key_cache->key_cache_inited)
+ {
+ my_error(ER_UNKNOWN_KEY_CACHE, MYF(0), key_cache_name->str);
+ DBUG_RETURN(TRUE);
+ }
check_opt.key_cache= key_cache;
DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt,
"assign_to_keycache", TL_READ_NO_INSERT, 0, 0,
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 90d9405ebe5..599dc993483 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -961,6 +961,7 @@ bool st_select_lex::cleanup()
}
non_agg_fields.empty();
inner_refs_list.empty();
+ exclude_from_table_unique_test= FALSE;
DBUG_RETURN(error);
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 80eb823c346..a921a87884e 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -257,7 +257,7 @@ int mysql_update(THD *thd,
thd_proc_info(thd, "init");
table= table_list->table;
- if (!table_list->updatable)
+ if (!table_list->single_table_updatable())
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
DBUG_RETURN(1);
@@ -1090,7 +1090,7 @@ reopen_tables:
/* if table will be updated then check that it is unique */
if (table->map & tables_for_update)
{
- if (!tl->updatable || check_key_in_view(thd, tl))
+ if (!tl->single_table_updatable() || check_key_in_view(thd, tl))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
DBUG_RETURN(TRUE);
diff --git a/sql/table.cc b/sql/table.cc
index 318e32da2f3..54f99cff28b 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -3863,6 +3863,28 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds,
DBUG_RETURN(FALSE);
}
+/**
+ Check that table/view is updatable and if it has single
+ underlying tables/views it is also updatable
+
+ @return Result of the check.
+*/
+
+bool TABLE_LIST::single_table_updatable()
+{
+ if (!updatable)
+ return false;
+ if (view_tables && view_tables->elements == 1)
+ {
+ /*
+ We need to check deeply only single table views. Multi-table views
+ will be turned to multi-table updates and then checked by leaf tables
+ */
+ return view_tables->head()->single_table_updatable();
+ }
+ return true;
+}
+
/*
Merge ON expressions for a view
@@ -4507,6 +4529,36 @@ bool TABLE_LIST::prepare_security(THD *thd)
DBUG_RETURN(FALSE);
}
+#ifndef DBUG_OFF
+void TABLE_LIST::set_check_merged()
+{
+ DBUG_ASSERT(derived);
+ /*
+ It is not simple to check all, but at least this should be checked:
+ this select is not excluded or the exclusion came from above.
+ */
+ DBUG_ASSERT(!derived->first_select()->exclude_from_table_unique_test ||
+ derived->outer_select()->
+ exclude_from_table_unique_test);
+}
+#endif
+
+void TABLE_LIST::set_check_materialized()
+{
+ DBUG_ASSERT(derived);
+ if (!derived->first_select()->exclude_from_table_unique_test)
+ derived->set_unique_exclude();
+ else
+ {
+ /*
+ The subtree should be already excluded
+ */
+ DBUG_ASSERT(!derived->first_select()->first_inner_unit() ||
+ derived->first_select()->first_inner_unit()->first_select()->
+ exclude_from_table_unique_test);
+ }
+}
+
Natural_join_column::Natural_join_column(Field_translator *field_param,
TABLE_LIST *tab)
@@ -4632,6 +4684,7 @@ Item *Field_iterator_table::create_item(THD *thd)
{
select->non_agg_fields.push_back(item);
item->marker= select->cur_pos_in_select_list;
+ select->set_non_agg_field_used(true);
}
return item;
}
@@ -5335,6 +5388,12 @@ void st_table::mark_virtual_columns_for_write(bool insert_fl)
Field **vfield_ptr, *tmp_vfield;
bool bitmap_updated= FALSE;
+ if (!vfield)
+ return;
+
+ if (!vfield)
+ return;
+
for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
{
tmp_vfield= *vfield_ptr;
@@ -6084,8 +6143,9 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
*/
if (is_materialized_derived())
{
- unit->master_unit()->set_unique_exclude();
+ set_check_materialized();
}
+
/*
Create field translation for mergeable derived tables/views.
For derived tables field translation can be created only after
diff --git a/sql/table.h b/sql/table.h
index 3d0dd35b21f..4a1719073b8 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1758,16 +1758,18 @@ struct TABLE_LIST
inline void set_merged_derived()
{
derived_type= ((derived_type & DTYPE_MASK) |
- DTYPE_TABLE | DTYPE_MERGE);
+ DTYPE_TABLE | DTYPE_MERGE);
+ set_check_merged();
}
inline bool is_materialized_derived()
{
return (derived_type & DTYPE_MATERIALIZE);
}
- inline void set_materialized_derived()
+ void set_materialized_derived()
{
derived_type= ((derived_type & DTYPE_MASK) |
- DTYPE_TABLE | DTYPE_MATERIALIZE);
+ DTYPE_TABLE | DTYPE_MATERIALIZE);
+ set_check_materialized();
}
inline bool is_multitable()
{
@@ -1808,9 +1810,17 @@ struct TABLE_LIST
int fetch_number_of_rows();
bool change_refs_to_fields();
+ bool single_table_updatable();
+
private:
bool prep_check_option(THD *thd, uint8 check_opt_type);
bool prep_where(THD *thd, Item **conds, bool no_where_clause);
+ void set_check_materialized();
+#ifndef DBUG_OFF
+ void set_check_merged();
+#else
+ inline void set_check_merged() {}
+#endif
/*
Cleanup for re-execution in a prepared statement or a stored
procedure.
diff --git a/sql/winservice.c b/sql/winservice.c
index 562f047fa79..3ec91c26835 100644
--- a/sql/winservice.c
+++ b/sql/winservice.c
@@ -116,7 +116,7 @@ int get_mysql_service_properties(const wchar_t *bin_path,
wcscat(mysqld_path, L".exe");
if(wcsicmp(file_part, L"mysqld.exe") != 0 &&
- wcsicmp(file_part, L"mysqld.exe") != 0 &&
+ wcsicmp(file_part, L"mysqld-debug.exe") != 0 &&
wcsicmp(file_part, L"mysqld-nt.exe") != 0)
{
/* The service executable is not mysqld. */
@@ -244,4 +244,4 @@ int get_mysql_service_properties(const wchar_t *bin_path,
end:
LocalFree((HLOCAL)args);
return retval;
-} \ No newline at end of file
+}