summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/event_db_repository.cc2
-rw-r--r--sql/ha_ndbcluster.cc4
-rw-r--r--sql/ha_ndbcluster_binlog.cc9
-rw-r--r--sql/ha_partition.cc54
-rw-r--r--sql/ha_partition.h11
-rw-r--r--sql/handler.cc12
-rw-r--r--sql/handler.h9
-rw-r--r--sql/item.cc21
-rw-r--r--sql/item_func.cc6
-rw-r--r--sql/item_timefunc.cc21
-rw-r--r--sql/item_xmlfunc.cc6
-rw-r--r--sql/log_event.cc4
-rw-r--r--sql/log_event_old.cc6
-rw-r--r--sql/mysqld.cc47
-rw-r--r--sql/partition_info.cc6
-rw-r--r--sql/repl_failsafe.cc4
-rw-r--r--sql/rpl_rli.cc4
-rw-r--r--sql/set_var.cc13
-rw-r--r--sql/set_var.h3
-rw-r--r--sql/share/errmsg.txt2
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_cache.cc2
-rw-r--r--sql/sql_class.h3
-rw-r--r--sql/sql_connect.cc2
-rw-r--r--sql/sql_db.cc10
-rw-r--r--sql/sql_delete.cc148
-rw-r--r--sql/sql_handler.cc7
-rw-r--r--sql/sql_partition.cc4
-rw-r--r--sql/sql_plugin.cc22
-rw-r--r--sql/sql_repl.cc14
-rw-r--r--sql/sql_select.cc10
-rw-r--r--sql/sql_show.cc2
-rw-r--r--sql/sql_table.cc22
-rw-r--r--sql/strfunc.cc2
-rw-r--r--sql/udf_example.c4
35 files changed, 323 insertions, 204 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 9a253d74546..8faab5023da 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -711,7 +711,7 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
DBUG_ENTER("Event_db_repository::update_event");
/* None or both must be set */
- DBUG_ASSERT(new_dbname && new_name || new_dbname == new_name);
+ DBUG_ASSERT((new_dbname && new_name) || new_dbname == new_name);
/* Reset sql_mode during data dictionary operations. */
thd->variables.sql_mode= 0;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index ddb90427fdc..dfd7ddc4d6c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -9426,9 +9426,11 @@ ndb_util_thread_fail:
pthread_cond_signal(&COND_ndb_util_ready);
pthread_mutex_unlock(&LOCK_ndb_util_thread);
DBUG_PRINT("exit", ("ndb_util_thread"));
+
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(NULL);
+ return NULL; // Avoid compiler warnings
}
/*
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index d9a9738ce72..2a87697c7fa 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -3663,9 +3663,11 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
ndb_binlog_thread_running= -1;
pthread_mutex_unlock(&injector_mutex);
pthread_cond_signal(&injector_cond);
+
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(NULL);
+ return NULL; // Avoid compiler warnings
}
lex_start(thd);
@@ -4376,10 +4378,11 @@ err:
(void) pthread_cond_signal(&injector_cond);
DBUG_PRINT("exit", ("ndb_binlog_thread"));
- my_thread_end();
+ DBUG_LEAVE; // Must match DBUG_ENTER()
+ my_thread_end();
pthread_exit(0);
- DBUG_RETURN(NULL);
+ return NULL; // Avoid compiler warnings
}
bool
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 5b053ab9cac..df5badccb1e 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1001,7 +1001,7 @@ static bool print_admin_msg(THD* thd, const char* msg_type,
if (!thd->vio_ok())
{
- sql_print_error(msgbuf);
+ sql_print_error("%s", msgbuf);
return TRUE;
}
@@ -3037,7 +3037,7 @@ int ha_partition::write_row(uchar * buf)
}
m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %d", part_id));
- start_part_bulk_insert(part_id);
+ start_part_bulk_insert(thd, part_id);
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[part_id]->ha_write_row(buf);
@@ -3101,7 +3101,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
}
m_last_part= new_part_id;
- start_part_bulk_insert(new_part_id);
+ start_part_bulk_insert(thd, new_part_id);
if (new_part_id == old_part_id)
{
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
@@ -3282,17 +3282,63 @@ void ha_partition::start_bulk_insert(ha_rows rows)
Check if start_bulk_insert has been called for this partition,
if not, call it and mark it called
*/
-void ha_partition::start_part_bulk_insert(uint part_id)
+void ha_partition::start_part_bulk_insert(THD *thd, uint part_id)
{
+ long old_buffer_size;
if (!bitmap_is_set(&m_bulk_insert_started, part_id) &&
bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
{
+ old_buffer_size= thd->variables.read_buff_size;
+ /* Update read_buffer_size for this partition */
+ thd->variables.read_buff_size= estimate_read_buffer_size(old_buffer_size);
m_file[part_id]->ha_start_bulk_insert(guess_bulk_insert_rows());
bitmap_set_bit(&m_bulk_insert_started, part_id);
+ thd->variables.read_buff_size= old_buffer_size;
}
m_bulk_inserted_rows++;
}
+/*
+ Estimate the read buffer size for each partition.
+ SYNOPSIS
+ ha_partition::estimate_read_buffer_size()
+ original_size read buffer size originally set for the server
+ RETURN VALUE
+ estimated buffer size.
+ DESCRIPTION
+ If the estimated number of rows to insert is less than 10 (but not 0)
+ the new buffer size is same as original buffer size.
+ In case of first partition of when partition function is monotonic
+ new buffer size is same as the original buffer size.
+ For rest of the partition total buffer of 10*original_size is divided
+ equally if number of partition is more than 10 other wise each partition
+ will be allowed to use original buffer size.
+*/
+long ha_partition::estimate_read_buffer_size(long original_size)
+{
+ /*
+ If number of rows to insert is less than 10, but not 0,
+ return original buffer size.
+ */
+ if (estimation_rows_to_insert && (estimation_rows_to_insert < 10))
+ return (original_size);
+ /*
+ If first insert/partition and monotonic partition function,
+ allow using buffer size originally set.
+ */
+ if (!m_bulk_inserted_rows &&
+ m_part_func_monotonicity_info != NON_MONOTONIC &&
+ m_tot_parts > 1)
+ return original_size;
+ /*
+ Allow total buffer used in all partition to go up to 10*read_buffer_size.
+ 11*read_buffer_size in case of monotonic partition function.
+ */
+
+ if (m_tot_parts < 10)
+ return original_size;
+ return (original_size * 10 / m_tot_parts);
+}
/*
Try to predict the number of inserts into this partition.
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index f47dfe8f621..c08b1f77eca 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -367,7 +367,8 @@ public:
virtual int end_bulk_insert();
private:
ha_rows guess_bulk_insert_rows();
- void start_part_bulk_insert(uint part_id);
+ void start_part_bulk_insert(THD *thd, uint part_id);
+ long estimate_read_buffer_size(long original_size);
public:
virtual bool is_fatal_error(int error, uint flags)
@@ -772,10 +773,10 @@ public:
if (m_handler_status < handler_initialized ||
m_handler_status >= handler_closed)
DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS);
- else
- DBUG_RETURN((m_file[0]->ha_table_flags() &
- ~(PARTITION_DISABLED_TABLE_FLAGS)) |
- (PARTITION_ENABLED_TABLE_FLAGS));
+
+ DBUG_RETURN((m_file[0]->ha_table_flags() &
+ ~(PARTITION_DISABLED_TABLE_FLAGS)) |
+ (PARTITION_ENABLED_TABLE_FLAGS));
}
/*
diff --git a/sql/handler.cc b/sql/handler.cc
index a4d88e84f4c..d07ebed8ab9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -3501,14 +3501,10 @@ int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
if (!(error=index_next(buf)))
{
my_ptrdiff_t ptrdiff= buf - table->record[0];
- uchar *save_record_0;
- KEY *key_info;
- KEY_PART_INFO *key_part;
- KEY_PART_INFO *key_part_end;
- LINT_INIT(save_record_0);
- LINT_INIT(key_info);
- LINT_INIT(key_part);
- LINT_INIT(key_part_end);
+ uchar *UNINIT_VAR(save_record_0);
+ KEY *UNINIT_VAR(key_info);
+ KEY_PART_INFO *UNINIT_VAR(key_part);
+ KEY_PART_INFO *UNINIT_VAR(key_part_end);
/*
key_cmp_if_same() compares table->record[0] against 'key'.
diff --git a/sql/handler.h b/sql/handler.h
index f759239d66e..fe8f7c437ff 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -913,6 +913,15 @@ typedef struct st_ha_create_information
ulong key_block_size;
SQL_LIST merge_list;
handlerton *db_type;
+ /**
+ Row type of the table definition.
+
+ Defaults to ROW_TYPE_DEFAULT for all non-ALTER statements.
+ For ALTER TABLE defaults to ROW_TYPE_NOT_USED (means "keep the current").
+
+ Can be changed either explicitly by the parser.
+ If nothing speficied inherits the value of the original table (if present).
+ */
enum row_type row_type;
uint null_bits; /* NULL bits at start of record */
uint options; /* OR of HA_CREATE_ options */
diff --git a/sql/item.cc b/sql/item.cc
index 26df3a45971..86e4551e55b 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -6331,9 +6331,26 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference)
/* view fild reference must be defined */
DBUG_ASSERT(*ref);
/* (*ref)->check_cols() will be made in Item_direct_ref::fix_fields */
- if (!(*ref)->fixed &&
- ((*ref)->fix_fields(thd, ref)))
+ if ((*ref)->fixed)
+ {
+ Item *ref_item= (*ref)->real_item();
+ if (ref_item->type() == Item::FIELD_ITEM)
+ {
+ /*
+ In some cases we need to update table read set(see bug#47150).
+ If ref item is FIELD_ITEM and fixed then field and table
+ have proper values. So we can use them for update.
+ */
+ Field *fld= ((Item_field*) ref_item)->field;
+ DBUG_ASSERT(fld && fld->table);
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ bitmap_set_bit(fld->table->read_set, fld->field_index);
+ }
+ }
+ else if (!(*ref)->fixed &&
+ ((*ref)->fix_fields(thd, ref)))
return TRUE;
+
return Item_direct_ref::fix_fields(thd, reference);
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index c775848cff2..d9e6f76dd6b 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -435,8 +435,7 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const
Field *Item_func::tmp_table_field(TABLE *table)
{
- Field *field;
- LINT_INIT(field);
+ Field *field= NULL;
switch (result_type()) {
case INT_RESULT:
@@ -4236,9 +4235,8 @@ void Item_func_set_user_var::save_item_result(Item *item)
bool
Item_func_set_user_var::update()
{
- bool res;
+ bool res= 0;
DBUG_ENTER("Item_func_set_user_var::update");
- LINT_INIT(res);
switch (cached_result_type) {
case REAL_RESULT:
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index a9e5c71ab56..b5037c08b3c 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -278,9 +278,9 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
int strict_week_number_year= -1;
int frac_part;
bool usa_time= 0;
- bool sunday_first_n_first_week_non_iso;
- bool strict_week_number;
- bool strict_week_number_year_type;
+ bool UNINIT_VAR(sunday_first_n_first_week_non_iso);
+ bool UNINIT_VAR(strict_week_number);
+ bool UNINIT_VAR(strict_week_number_year_type);
const char *val_begin= val;
const char *val_end= val + length;
const char *ptr= format->format.str;
@@ -288,11 +288,6 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
CHARSET_INFO *cs= &my_charset_bin;
DBUG_ENTER("extract_date_time");
- LINT_INIT(strict_week_number);
- /* Remove valgrind varnings when using gcc 3.3 and -O1 */
- PURIFY_OR_LINT_INIT(strict_week_number_year_type);
- PURIFY_OR_LINT_INIT(sunday_first_n_first_week_non_iso);
-
if (!sub_pattern_end)
bzero((char*) l_time, sizeof(*l_time));
@@ -1349,15 +1344,11 @@ bool get_interval_value(Item *args,interval_type int_type,
String *str_value, INTERVAL *interval)
{
ulonglong array[5];
- longlong value;
- const char *str;
- size_t length;
+ longlong UNINIT_VAR(value);
+ const char *UNINIT_VAR(str);
+ size_t UNINIT_VAR(length);
CHARSET_INFO *cs=str_value->charset();
- LINT_INIT(value);
- LINT_INIT(str);
- LINT_INIT(length);
-
bzero((char*) interval,sizeof(*interval));
if ((int) int_type <= INTERVAL_MICROSECOND)
{
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 6320873e4d3..1eff00027f2 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -1354,7 +1354,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
MY_XPATH_LEX *lex, const char *beg, const char *end)
{
int ch, ctype, length;
- for ( ; beg < end && *beg == ' ' ; beg++); // skip leading spaces
+ for ( ; beg < end && *beg == ' ' ; beg++) ; // skip leading spaces
lex->beg= beg;
if (beg >= end)
@@ -1423,7 +1423,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
if (my_xdigit(ch)) // a sequence of digits
{
- for ( ; beg < end && my_xdigit(*beg) ; beg++);
+ for ( ; beg < end && my_xdigit(*beg) ; beg++) ;
lex->end= beg;
lex->term= MY_XPATH_LEX_DIGITS;
return;
@@ -1431,7 +1431,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
if (ch == '"' || ch == '\'') // a string: either '...' or "..."
{
- for ( ; beg < end && *beg != ch ; beg++);
+ for ( ; beg < end && *beg != ch ; beg++) ;
if (beg < end)
{
lex->end= beg+1;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 08fe3aba8ed..3c584011bfb 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -8835,11 +8835,11 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
*/
store_record(table,record[1]);
- if (table->s->keys > 0)
+ if (table->s->keys > 0 && table->s->keys_in_use.is_set(0))
{
DBUG_PRINT("info",("locating record using primary key (index_read)"));
- /* We have a key: search the table using the index */
+ /* The 0th key is active: search the table using the index */
if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
{
DBUG_PRINT("info",("ha_index_init returns error %d",error));
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 68cd2bf4673..f14ebe49706 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -1245,8 +1245,8 @@ Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
solution, to be able to terminate a started statement in the
binary log: the extraneous events will be removed in the future.
*/
- DBUG_ASSERT(tbl_arg && tbl_arg->s && tid != ~0UL ||
- !tbl_arg && !cols && tid == ~0UL);
+ DBUG_ASSERT((tbl_arg && tbl_arg->s && tid != ~0UL) ||
+ (!tbl_arg && !cols && tid == ~0UL));
if (thd_arg->options & OPTION_NO_FOREIGN_KEY_CHECKS)
set_flags(NO_FOREIGN_KEY_CHECKS_F);
@@ -1409,7 +1409,7 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
#endif
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
- DBUG_ASSERT(!m_rows_buf || m_rows_end && m_rows_buf < m_rows_end);
+ DBUG_ASSERT(!m_rows_buf || (m_rows_end && m_rows_buf < m_rows_end));
DBUG_ASSERT(m_rows_cur <= m_rows_end);
/* The cast will always work since m_rows_cur <= m_rows_end */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 3f536f01094..3433b54cb29 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1107,13 +1107,13 @@ void kill_mysql(void)
#if defined(__NETWARE__)
extern "C" void kill_server(int sig_ptr)
-#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN
+#define RETURN_FROM_KILL_SERVER return
#elif !defined(__WIN__)
static void *kill_server(void *sig_ptr)
-#define RETURN_FROM_KILL_SERVER DBUG_RETURN(0)
+#define RETURN_FROM_KILL_SERVER return 0
#else
static void __cdecl kill_server(int sig_ptr)
-#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN
+#define RETURN_FROM_KILL_SERVER return
#endif
{
DBUG_ENTER("kill_server");
@@ -1121,7 +1121,10 @@ static void __cdecl kill_server(int sig_ptr)
int sig=(int) (long) sig_ptr; // This is passed a int
// if there is a signal during the kill in progress, ignore the other
if (kill_in_progress) // Safety
+ {
+ DBUG_LEAVE;
RETURN_FROM_KILL_SERVER;
+ }
kill_in_progress=TRUE;
abort_loop=1; // This should be set
if (sig != 0) // 0 is not a valid signal number
@@ -1156,12 +1159,19 @@ static void __cdecl kill_server(int sig_ptr)
pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
/* purecov: end */
-#endif /* EMBEDDED_LIBRARY */
+ RETURN_FROM_KILL_SERVER; // Avoid compiler warnings
+
+#else /* EMBEDDED_LIBRARY*/
+
+ DBUG_LEAVE;
RETURN_FROM_KILL_SERVER;
+
+#endif /* EMBEDDED_LIBRARY */
}
@@ -1935,8 +1945,9 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
my_thread_end();
(void) pthread_cond_broadcast(&COND_thread_count);
+ DBUG_LEAVE; // Must match DBUG_ENTER()
pthread_exit(0);
- DBUG_RETURN(0); // Impossible
+ return 0; // Avoid compiler warnings
}
@@ -2119,15 +2130,14 @@ static void init_signals(void)
win_install_sigabrt_handler();
if(opt_console)
SetConsoleCtrlHandler(console_event_handler,TRUE);
- else
- {
+
/* Avoid MessageBox()es*/
- _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
- _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
- _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
/*
Do not use SEM_NOGPFAULTERRORBOX in the following SetErrorMode (),
@@ -2136,8 +2146,8 @@ static void init_signals(void)
exception filter is not guaranteed to work in all situation
(like heap corruption or stack overflow)
*/
- SetErrorMode(SetErrorMode(0)|SEM_FAILCRITICALERRORS|SEM_NOOPENFILEERRORBOX);
- }
+ SetErrorMode(SetErrorMode(0) | SEM_FAILCRITICALERRORS
+ | SEM_NOOPENFILEERRORBOX);
SetUnhandledExceptionFilter(my_unhandler_exception_filter);
}
@@ -2757,7 +2767,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
DBUG_PRINT("quit",("signal_handler: calling my_thread_end()"));
my_thread_end();
signal_thread_in_use= 0;
+ DBUG_LEAVE; // Must match DBUG_ENTER()
pthread_exit(0); // Safety
+ return 0; // Avoid compiler warnings
}
switch (sig) {
case SIGTERM:
@@ -3707,6 +3719,7 @@ static void end_ssl()
static int init_server_components()
{
+ FILE* reopen;
DBUG_ENTER("init_server_components");
/*
We need to call each of these following functions to ensure that
@@ -3749,7 +3762,7 @@ static int init_server_components()
if (freopen(log_error_file, "a+", stdout))
#endif
{
- freopen(log_error_file, "a+", stderr);
+ reopen= freopen(log_error_file, "a+", stderr);
setbuf(stderr, NULL);
}
}
@@ -4604,7 +4617,7 @@ default_service_handling(char **argv,
if (opt_delim= strchr(extra_opt, '='))
{
size_t length= ++opt_delim - extra_opt;
- strnmov(pos, extra_opt, length);
+ pos= strnmov(pos, extra_opt, length);
}
else
opt_delim= extra_opt;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index e2027d3571e..ba9ea0e876e 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -483,10 +483,8 @@ static bool check_engine_condition(partition_element *p_elem,
{
DBUG_RETURN(TRUE);
}
- else
- {
- DBUG_RETURN(FALSE);
- }
+
+ DBUG_RETURN(FALSE);
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 582348608de..312499bb4ee 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -638,9 +638,11 @@ err:
if (recovery_captain)
mysql_close(recovery_captain);
delete thd;
+
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0);
+ return 0; // Avoid compiler warnings
}
#endif
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index 18fbae9bb9d..0c6cc15297f 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -33,10 +33,10 @@ Relay_log_info::Relay_log_info()
:Slave_reporting_capability("SQL"),
no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id),
info_fd(-1), cur_log_fd(-1), save_temporary_tables(0),
+ cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0),
#if HAVE_purify
is_fake(FALSE),
#endif
- cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0),
group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0),
last_master_timestamp(0), slave_skip_counter(0),
abort_pos_wait(0), slave_run_id(0), sql_thd(0),
@@ -300,7 +300,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
DBUG_RETURN(error);
err:
- sql_print_error(msg);
+ sql_print_error("%s", msg);
end_io_cache(&rli->info_file);
if (info_fd >= 0)
my_close(info_fd, MYF(0));
diff --git a/sql/set_var.cc b/sql/set_var.cc
index b64b54fdd29..cd01f6c8624 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -529,11 +529,11 @@ static sys_var_const sys_skip_networking(&vars, "skip_networking",
static sys_var_const sys_skip_show_database(&vars, "skip_show_database",
OPT_GLOBAL, SHOW_BOOL,
(uchar*) &opt_skip_show_db);
-#ifdef HAVE_SYS_UN_H
+
static sys_var_const sys_socket(&vars, "socket",
OPT_GLOBAL, SHOW_CHAR_PTR,
(uchar*) &mysqld_unix_port);
-#endif
+
#ifdef HAVE_THR_SETCONCURRENCY
/* purecov: begin tested */
static sys_var_const sys_thread_concurrency(&vars, "thread_concurrency",
@@ -3349,7 +3349,7 @@ int set_var_init()
uint count= 0;
DBUG_ENTER("set_var_init");
- for (sys_var *var=vars.first; var; var= var->next, count++);
+ for (sys_var *var=vars.first; var; var= var->next, count++) ;
if (hash_init(&system_variable_hash, system_charset_info, count, 0,
0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
@@ -4184,10 +4184,10 @@ bool sys_var_opt_readonly::update(THD *thd, set_var *var)
can cause to wait on a read lock, it's required for the client application
to unlock everything, and acceptable for the server to wait on all locks.
*/
- if (result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE))
+ if ((result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE)))
goto end_with_read_lock;
- if (result= make_global_read_lock_block_commit(thd))
+ if ((result= make_global_read_lock_block_commit(thd)))
goto end_with_read_lock;
/* Change the opt_readonly system variable, safe because the lock is held */
@@ -4200,6 +4200,7 @@ end_with_read_lock:
}
+#ifndef DBUG_OFF
/* even session variable here requires SUPER, because of -#o,file */
bool sys_var_thd_dbug::check(THD *thd, set_var *var)
{
@@ -4226,6 +4227,8 @@ uchar *sys_var_thd_dbug::value_ptr(THD *thd, enum_var_type type, LEX_STRING *b)
DBUG_EXPLAIN(buf, sizeof(buf));
return (uchar*) thd->strdup(buf);
}
+#endif /* DBUG_OFF */
+
#ifdef HAVE_EVENT_SCHEDULER
bool sys_var_event_scheduler::check(THD *thd, set_var *var)
diff --git a/sql/set_var.h b/sql/set_var.h
index 10e6e0f9c35..a54591b0fd6 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -621,6 +621,7 @@ public:
uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
+#ifndef DBUG_OFF
class sys_var_thd_dbug :public sys_var_thd
{
public:
@@ -634,7 +635,7 @@ public:
void set_default(THD *thd, enum_var_type type) { DBUG_POP(); }
uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b);
};
-
+#endif /* DBUG_OFF */
/* some variables that require special handling */
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 3aba434b284..fdad2a44b68 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -4702,7 +4702,7 @@ ER_NOT_SUPPORTED_YET 42000
swe "Denna version av MySQL kan ännu inte utföra '%s'"
ER_MASTER_FATAL_ERROR_READING_BINLOG
nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
- eng "Got fatal error %d: '%-.128s' from master when reading data from binary log"
+ eng "Got fatal error %d from master when reading data from binary log: '%-.128s'"
ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
diff --git a/sql/slave.cc b/sql/slave.cc
index fac9ee214c5..66cbef1029a 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1195,7 +1195,7 @@ err:
if (master_res)
mysql_free_result(master_res);
DBUG_ASSERT(err_code != 0);
- mi->report(ERROR_LEVEL, err_code, err_buff);
+ mi->report(ERROR_LEVEL, err_code, "%s", err_buff);
DBUG_RETURN(1);
}
@@ -2386,7 +2386,7 @@ static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info)
if (io_slave_killed(thd, mi))
{
if (info && global_system_variables.log_warnings)
- sql_print_information(info);
+ sql_print_information("%s", info);
return TRUE;
}
return FALSE;
@@ -2456,13 +2456,13 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi,
}
else
{
- sql_print_information(buf);
+ sql_print_information("%s", buf);
}
}
if (safe_reconnect(thd, mysql, mi, 1) || io_slave_killed(thd, mi))
{
if (global_system_variables.log_warnings)
- sql_print_information(messages[SLAVE_RECON_MSG_KILLED_AFTER]);
+ sql_print_information("%s", messages[SLAVE_RECON_MSG_KILLED_AFTER]);
return 1;
}
return 0;
@@ -2678,15 +2678,19 @@ Log entry on master is longer than max_allowed_packet (%ld) on \
slave. If the entry is correct, restart the server with a higher value of \
max_allowed_packet",
thd->variables.max_allowed_packet);
+ mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE,
+ "%s", ER(ER_NET_PACKET_TOO_LARGE));
goto err;
case ER_MASTER_FATAL_ERROR_READING_BINLOG:
- sql_print_error(ER(mysql_error_number), mysql_error_number,
- mysql_error(mysql));
+ mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG,
+ ER(ER_MASTER_FATAL_ERROR_READING_BINLOG),
+ mysql_error_number, mysql_error(mysql));
goto err;
- case EE_OUTOFMEMORY:
- case ER_OUTOFMEMORY:
+ case ER_OUT_OF_RESOURCES:
sql_print_error("\
Stopping slave I/O thread due to out-of-memory error from master");
+ mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES,
+ "%s", ER(ER_OUT_OF_RESOURCES));
goto err;
}
if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
@@ -2795,9 +2799,11 @@ err:
pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done
DBUG_EXECUTE_IF("simulate_slave_delay_at_terminate_bug38694", sleep(5););
pthread_mutex_unlock(&mi->run_lock);
+
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ return 0; // Avoid compiler warnings
}
/*
@@ -3044,7 +3050,7 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
This function is reporting an error which was not reported
while executing exec_relay_log_event().
*/
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), errmsg);
+ rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), "%s", errmsg);
}
else if (last_errno != thd->main_da.sql_errno())
{
@@ -3153,10 +3159,11 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
pthread_cond_broadcast(&rli->stop_cond);
DBUG_EXECUTE_IF("simulate_slave_delay_at_terminate_bug38694", sleep(5););
pthread_mutex_unlock(&rli->run_lock); // tell the world we are done
-
+
+ DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ return 0; // Avoid compiler warnings
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index df19fd05417..a41b7bd40bb 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -383,7 +383,7 @@ static void debug_wait_for_kill(const char *info)
thd= current_thd;
prev_info= thd->proc_info;
thd->proc_info= info;
- sql_print_information(info);
+ sql_print_information("%s", info);
while(!thd->killed)
my_sleep(1000);
thd->killed= THD::NOT_KILLED;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index c38eb17f191..b0128244030 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -2907,7 +2907,8 @@ public:
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
- int do_deletes();
+ int do_deletes();
+ int do_table_deletes(TABLE *table, bool ignore);
bool send_eof();
virtual void abort();
};
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 98574a07a4e..bb4ba2bf21b 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -985,7 +985,7 @@ static void end_connection(THD *thd)
if (thd->user_connect)
decrease_user_connections(thd->user_connect);
- if (thd->killed || net->error && net->vio != 0)
+ if (thd->killed || (net->error && net->vio != 0))
{
statistic_increment(aborted_threads,&LOCK_status);
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 3fca5bd7df6..c19bfba9fc1 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -907,6 +907,9 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
remove_db_from_cache(db);
pthread_mutex_unlock(&LOCK_open);
+ Drop_table_error_handler err_handler(thd->get_internal_handler());
+ thd->push_internal_handler(&err_handler);
+
error= -1;
/*
We temporarily disable the binary log while dropping the objects
@@ -939,6 +942,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
error = 0;
reenable_binlog(thd);
}
+ thd->pop_internal_handler();
}
if (!silent && deleted>=0)
{
@@ -1446,11 +1450,11 @@ cmp_db_names(const char *db1_name,
{
return
/* db1 is NULL and db2 is NULL */
- !db1_name && !db2_name ||
+ (!db1_name && !db2_name) ||
/* db1 is not-NULL, db2 is not-NULL, db1 == db2. */
- db1_name && db2_name &&
- my_strcasecmp(system_charset_info, db1_name, db2_name) == 0;
+ (db1_name && db2_name &&
+ my_strcasecmp(system_charset_info, db1_name, db2_name) == 0);
}
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d2f90fa9288..a81a5f4641f 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -860,22 +860,19 @@ void multi_delete::abort()
-/*
+/**
Do delete from other tables.
- Returns values:
- 0 ok
- 1 error
+
+ @retval 0 ok
+ @retval 1 error
+
+ @todo Is there any reason not use the normal nested-loops join? If not, and
+ there is no documentation supporting it, this method and callee should be
+ removed and there should be hooks within normal execution.
*/
int multi_delete::do_deletes()
{
- int local_error= 0, counter= 0, tmp_error;
- bool will_batch;
- /*
- If the IGNORE option is used all non fatal errors will be translated
- to warnings and we should not break the row-by-row iteration
- */
- bool ignore= thd->lex->current_select->no_error;
DBUG_ENTER("do_deletes");
DBUG_ASSERT(do_delete);
@@ -886,79 +883,108 @@ int multi_delete::do_deletes()
table_being_deleted= (delete_while_scanning ? delete_tables->next_local :
delete_tables);
- for (; table_being_deleted;
+ for (uint counter= 0; table_being_deleted;
table_being_deleted= table_being_deleted->next_local, counter++)
{
- ha_rows last_deleted= deleted;
TABLE *table = table_being_deleted->table;
if (tempfiles[counter]->get(table))
+ DBUG_RETURN(1);
+
+ int local_error=
+ do_table_deletes(table, thd->lex->current_select->no_error);
+
+ if (thd->killed && !local_error)
+ DBUG_RETURN(1);
+
+ if (local_error == -1) // End of file
+ local_error = 0;
+
+ if (local_error)
+ DBUG_RETURN(local_error);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Implements the inner loop of nested-loops join within multi-DELETE
+ execution.
+
+ @param table The table from which to delete.
+
+ @param ignore If used, all non fatal errors will be translated
+ to warnings and we should not break the row-by-row iteration.
+
+ @return Status code
+
+ @retval 0 All ok.
+ @retval 1 Triggers or handler reported error.
+ @retval -1 End of file from handler.
+*/
+int multi_delete::do_table_deletes(TABLE *table, bool ignore)
+{
+ int local_error= 0;
+ READ_RECORD info;
+ ha_rows last_deleted= deleted;
+ DBUG_ENTER("do_deletes_for_table");
+ init_read_record(&info, thd, table, NULL, 0, 1, FALSE);
+ /*
+ Ignore any rows not found in reference tables as they may already have
+ been deleted by foreign key handling
+ */
+ info.ignore_not_found_rows= 1;
+ bool will_batch= !table->file->start_bulk_delete();
+ while (!(local_error= info.read_record(&info)) && !thd->killed)
+ {
+ if (table->triggers &&
+ table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
+ TRG_ACTION_BEFORE, FALSE))
{
- local_error=1;
+ local_error= 1;
break;
}
-
- READ_RECORD info;
- init_read_record(&info, thd, table, NULL, 0, 1, FALSE);
+
+ local_error= table->file->ha_delete_row(table->record[0]);
+ if (local_error && !ignore)
+ {
+ table->file->print_error(local_error, MYF(0));
+ break;
+ }
+
/*
- Ignore any rows not found in reference tables as they may already have
- been deleted by foreign key handling
+ Increase the reported number of deleted rows only if no error occurred
+ during ha_delete_row.
+ Also, don't execute the AFTER trigger if the row operation failed.
*/
- info.ignore_not_found_rows= 1;
- will_batch= !table->file->start_bulk_delete();
- while (!(local_error=info.read_record(&info)) && !thd->killed)
+ if (!local_error)
{
+ deleted++;
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
- TRG_ACTION_BEFORE, FALSE))
+ TRG_ACTION_AFTER, FALSE))
{
local_error= 1;
break;
}
-
- local_error= table->file->ha_delete_row(table->record[0]);
- if (local_error && !ignore)
- {
- table->file->print_error(local_error,MYF(0));
- break;
- }
-
- /*
- Increase the reported number of deleted rows only if no error occurred
- during ha_delete_row.
- Also, don't execute the AFTER trigger if the row operation failed.
- */
- if (!local_error)
- {
- deleted++;
- if (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
- TRG_ACTION_AFTER, FALSE))
- {
- local_error= 1;
- break;
- }
- }
}
- if (will_batch && (tmp_error= table->file->end_bulk_delete()))
+ }
+ if (will_batch)
+ {
+ int tmp_error= table->file->end_bulk_delete();
+ if (tmp_error && !local_error)
{
- if (!local_error)
- {
- local_error= tmp_error;
- table->file->print_error(local_error,MYF(0));
- }
+ local_error= tmp_error;
+ table->file->print_error(local_error, MYF(0));
}
- if (last_deleted != deleted && !table->file->has_transactions())
- thd->transaction.stmt.modified_non_trans_table= TRUE;
- end_read_record(&info);
- if (thd->killed && !local_error)
- local_error= 1;
- if (local_error == -1) // End of file
- local_error = 0;
}
+ if (last_deleted != deleted && !table->file->has_transactions())
+ thd->transaction.stmt.modified_non_trans_table= TRUE;
+
+ end_read_record(&info);
+
DBUG_RETURN(local_error);
}
-
/*
Send ok to the client
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 1e92d95573a..3bbf4b78d07 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -422,16 +422,13 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
String buffer(buff, sizeof(buff), system_charset_info);
int error, keyno= -1;
uint num_rows;
- uchar *key;
- uint key_len;
+ uchar *UNINIT_VAR(key);
+ uint UNINIT_VAR(key_len);
bool need_reopen;
DBUG_ENTER("mysql_ha_read");
DBUG_PRINT("enter",("'%s'.'%s' as '%s'",
tables->db, tables->table_name, tables->alias));
- LINT_INIT(key);
- LINT_INIT(key_len);
-
thd->lex->select_lex.context.resolve_in_table_list_only(tables);
list.push_front(new Item_field(&thd->lex->select_lex.context,
NULL, NULL, "*"));
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 08ff2daacb9..2fb09ab7566 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -7098,9 +7098,9 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
longlong dummy;
field->store(part_iter->field_vals.cur++,
((Field_num*)field)->unsigned_flag);
- if (part_iter->part_info->is_sub_partitioned() &&
+ if ((part_iter->part_info->is_sub_partitioned() &&
!part_iter->part_info->get_part_partition_id(part_iter->part_info,
- &part_id, &dummy) ||
+ &part_id, &dummy)) ||
!part_iter->part_info->get_partition_id(part_iter->part_info,
&part_id, &dummy))
return part_id;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index da168d36429..b411d5e3095 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -368,7 +368,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (report & REPORT_TO_USER)
my_error(ER_UDF_NO_PATHS, MYF(0));
if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_UDF_NO_PATHS));
+ sql_print_error("%s", ER(ER_UDF_NO_PATHS));
DBUG_RETURN(0);
}
/* If this dll is already loaded just increase ref_count. */
@@ -1520,7 +1520,7 @@ error:
void plugin_shutdown(void)
{
- uint i, count= plugin_array.elements, free_slots= 0;
+ uint i, count= plugin_array.elements;
struct st_plugin_int **plugins, *plugin;
struct st_plugin_dl **dl;
DBUG_ENTER("plugin_shutdown");
@@ -1541,18 +1541,13 @@ void plugin_shutdown(void)
while (reap_needed && (count= plugin_array.elements))
{
reap_plugins();
- for (i= free_slots= 0; i < count; i++)
+ for (i= 0; i < count; i++)
{
plugin= *dynamic_element(&plugin_array, i, struct st_plugin_int **);
- switch (plugin->state) {
- case PLUGIN_IS_READY:
+ if (plugin->state == PLUGIN_IS_READY)
+ {
plugin->state= PLUGIN_IS_DELETED;
reap_needed= true;
- break;
- case PLUGIN_IS_FREED:
- case PLUGIN_IS_UNINITIALIZED:
- free_slots++;
- break;
}
}
if (!reap_needed)
@@ -1565,9 +1560,6 @@ void plugin_shutdown(void)
}
}
- if (count > free_slots)
- sql_print_warning("Forcing shutdown of %d plugins", count - free_slots);
-
plugins= (struct st_plugin_int **) my_alloca(sizeof(void*) * (count+1));
/*
@@ -1589,8 +1581,8 @@ void plugin_shutdown(void)
if (!(plugins[i]->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_FREED |
PLUGIN_IS_DISABLED)))
{
- sql_print_information("Plugin '%s' will be forced to shutdown",
- plugins[i]->name.str);
+ sql_print_warning("Plugin '%s' will be forced to shutdown",
+ plugins[i]->name.str);
/*
We are forcing deinit on plugins so we don't want to do a ref_count
check until we have processed all the plugins.
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 0ec8d91214c..b8f2e1e39bf 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -623,7 +623,7 @@ impossible position";
*/
{
log.error=0;
- bool read_packet = 0, fatal_error = 0;
+ bool read_packet = 0;
#ifndef DBUG_OFF
if (max_binlog_dump_events && !left_events--)
@@ -645,7 +645,7 @@ impossible position";
*/
pthread_mutex_lock(log_lock);
- switch (Log_event::read_log_event(&log, packet, (pthread_mutex_t*)0)) {
+ switch (error= Log_event::read_log_event(&log, packet, (pthread_mutex_t*) 0)) {
case 0:
/* we read successfully, so we'll need to send it to the slave */
pthread_mutex_unlock(log_lock);
@@ -671,8 +671,8 @@ impossible position";
default:
pthread_mutex_unlock(log_lock);
- fatal_error = 1;
- break;
+ test_for_non_eof_log_read_errors(error, &errmsg);
+ goto err;
}
if (read_packet)
@@ -701,12 +701,6 @@ impossible position";
*/
}
- if (fatal_error)
- {
- errmsg = "error reading log entry";
- my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
- goto err;
- }
log.error=0;
}
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 8b1e0ae365b..3f1432914a0 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2248,7 +2248,7 @@ JOIN::destroy()
tab->cleanup();
}
tmp_join->tmp_join= 0;
- tmp_table_param.cleanup();
+ tmp_table_param.copy_field= 0;
DBUG_RETURN(tmp_join->destroy());
}
cond_equal= 0;
@@ -3308,12 +3308,12 @@ add_key_equal_fields(KEY_FIELD **key_fields, uint and_level,
@retval FALSE it's something else
*/
-inline static bool
+static bool
is_local_field (Item *field)
{
- field= field->real_item();
- return field->type() == Item::FIELD_ITEM &&
- !((Item_field *)field)->depended_from;
+ return field->real_item()->type() == Item::FIELD_ITEM
+ && !(field->used_tables() & OUTER_REF_TABLE_BIT)
+ && !((Item_field *)field->real_item())->depended_from;
}
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 5c2c351652b..bb377e676e2 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -3529,7 +3529,9 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
TABLE_SHARE *share= show_table->s;
handler *file= show_table->file;
handlerton *tmp_db_type= share->db_type();
+#ifdef WITH_PARTITION_STORAGE_ENGINE
bool is_partitioned= FALSE;
+#endif
if (share->tmp_table == SYSTEM_TMP_TABLE)
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
else if (share->tmp_table)
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 5f718b25d60..1d147c54059 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1951,7 +1951,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
being built. The string always end in a comma and the comma
will be chopped off before being written to the binary log.
*/
- if (thd->current_stmt_binlog_row_based && !dont_log_query)
+ if (!drop_temporary && thd->current_stmt_binlog_row_based && !dont_log_query)
{
non_temp_tables_count++;
/*
@@ -6658,9 +6658,19 @@ view_err:
goto err;
}
+ /*
+ If this is an ALTER TABLE and no explicit row type specified reuse
+ the table's row type.
+ Note : this is the same as if the row type was specified explicitly.
+ */
if (create_info->row_type == ROW_TYPE_NOT_USED)
{
+ /* ALTER TABLE without explicit row type */
create_info->row_type= table->s->row_type;
+ }
+ else
+ {
+ /* ALTER TABLE with specific row type */
create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT;
}
@@ -7889,8 +7899,14 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
for (uint i= 0; i < t->s->fields; i++ )
{
Field *f= t->field[i];
- if ((f->type() == MYSQL_TYPE_BLOB) ||
- (f->type() == MYSQL_TYPE_VARCHAR))
+ enum_field_types field_type= f->type();
+ /*
+ BLOB and VARCHAR have pointers in their field, we must convert
+ to string; GEOMETRY is implemented on top of BLOB.
+ */
+ if ((field_type == MYSQL_TYPE_BLOB) ||
+ (field_type == MYSQL_TYPE_VARCHAR) ||
+ (field_type == MYSQL_TYPE_GEOMETRY))
{
String tmp;
f->val_str(&tmp);
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 1fb9c1a8451..56fa4a380ea 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -147,7 +147,7 @@ static uint parse_name(TYPELIB *lib, const char **strpos, const char *end,
}
}
else
- for (; pos != end && *pos != '=' && *pos !=',' ; pos++);
+ for (; pos != end && *pos != '=' && *pos !=',' ; pos++) ;
uint var_len= (uint) (pos - start);
/* Determine which flag it is */
diff --git a/sql/udf_example.c b/sql/udf_example.c
index 30d85d95034..82af58ec502 100644
--- a/sql/udf_example.c
+++ b/sql/udf_example.c
@@ -139,10 +139,10 @@ typedef long long longlong;
#include <mysql.h>
#include <ctype.h>
-static pthread_mutex_t LOCK_hostname;
-
#ifdef HAVE_DLOPEN
+static pthread_mutex_t LOCK_hostname;
+
/* These must be right or mysqld will not find the symbol! */
my_bool metaphon_init(UDF_INIT *initid, UDF_ARGS *args, char *message);