summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorVladislav Vaintroub <wlad@mariadb.com>2016-04-09 17:03:48 +0200
committerVladislav Vaintroub <wlad@mariadb.com>2016-04-09 17:03:48 +0200
commitcd776fedba3e2902bc25ee206d6e6266e7eb9411 (patch)
tree95bf82d0e0522c6af708cd28639c82e004b5a264 /sql
parentf884d233e6a5f68bab846a7bdbd041fc4415ad77 (diff)
parentd516a2ae0cbd09d3b5b1667ec62b421330ab9902 (diff)
downloadmariadb-git-10.2-connector-c-integ.tar.gz
Merge branch '10.2' into 10.2-connector-c-integ10.2-connector-c-integ
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt4
-rw-r--r--sql/bounded_queue.h2
-rw-r--r--sql/contributors.h8
-rw-r--r--sql/derror.cc255
-rw-r--r--sql/derror.h3
-rw-r--r--sql/discover.cc5
-rw-r--r--sql/event_db_repository.cc7
-rw-r--r--sql/event_queue.cc2
-rw-r--r--sql/events.cc33
-rw-r--r--sql/field.cc66
-rw-r--r--sql/field.h2
-rw-r--r--sql/field_conv.cc3
-rw-r--r--sql/filesort.cc289
-rw-r--r--sql/filesort.h96
-rw-r--r--sql/filesort_utils.cc68
-rw-r--r--sql/filesort_utils.h34
-rw-r--r--sql/gcalc_slicescan.h2
-rw-r--r--sql/ha_partition.cc2
-rw-r--r--sql/handler.cc17
-rw-r--r--sql/handler.h4
-rw-r--r--sql/item.cc24
-rw-r--r--sql/item.h17
-rw-r--r--sql/item_cmpfunc.cc243
-rw-r--r--sql/item_cmpfunc.h114
-rw-r--r--sql/item_func.cc38
-rw-r--r--sql/item_func.h20
-rw-r--r--sql/item_row.h11
-rw-r--r--sql/item_subselect.cc7
-rw-r--r--sql/item_subselect.h3
-rw-r--r--sql/item_sum.cc57
-rw-r--r--sql/item_sum.h14
-rw-r--r--sql/key.cc5
-rw-r--r--sql/lex.h1
-rw-r--r--sql/log.cc14
-rw-r--r--sql/log_event.cc26
-rw-r--r--sql/log_event_old.cc4
-rw-r--r--sql/my_apc.cc2
-rw-r--r--sql/mysql_install_db.cc6
-rw-r--r--sql/mysqld.cc53
-rw-r--r--sql/mysqld.h16
-rw-r--r--sql/net_serv.cc22
-rw-r--r--sql/opt_range.cc29
-rw-r--r--sql/opt_range.h3
-rw-r--r--sql/opt_subselect.cc3
-rw-r--r--sql/records.cc59
-rw-r--r--sql/records.h6
-rw-r--r--sql/rpl_parallel.cc7
-rw-r--r--sql/rpl_parallel.h2
-rw-r--r--sql/rpl_record.cc4
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/set_var.h6
-rw-r--r--sql/share/errmsg-utf8.txt34
-rw-r--r--sql/signal_handler.cc4
-rw-r--r--sql/slave.cc1
-rw-r--r--sql/sp.cc80
-rw-r--r--sql/sp.h2
-rw-r--r--sql/sp_head.cc18
-rw-r--r--sql/sp_head.h2
-rw-r--r--sql/sql_acl.cc14
-rw-r--r--sql/sql_admin.cc94
-rw-r--r--sql/sql_audit.h20
-rw-r--r--sql/sql_base.cc50
-rw-r--r--sql/sql_base.h1
-rw-r--r--sql/sql_class.cc6
-rw-r--r--sql/sql_class.h102
-rw-r--r--sql/sql_connect.cc2
-rw-r--r--sql/sql_const.h2
-rw-r--r--sql/sql_cte.cc601
-rw-r--r--sql/sql_cte.h178
-rw-r--r--sql/sql_delete.cc66
-rw-r--r--sql/sql_derived.cc4
-rw-r--r--sql/sql_help.cc17
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_join_cache.cc2
-rw-r--r--sql/sql_lex.cc128
-rw-r--r--sql/sql_lex.h43
-rw-r--r--sql/sql_load.cc383
-rw-r--r--sql/sql_locale.h2
-rw-r--r--sql/sql_parse.cc560
-rw-r--r--sql/sql_parse.h6
-rw-r--r--sql/sql_plugin.cc3
-rw-r--r--sql/sql_prepare.cc26
-rw-r--r--sql/sql_prepare.h4
-rw-r--r--sql/sql_repl.cc4
-rw-r--r--sql/sql_select.cc211
-rw-r--r--sql/sql_select.h11
-rw-r--r--sql/sql_servers.cc4
-rw-r--r--sql/sql_show.cc41
-rw-r--r--sql/sql_sort.h5
-rw-r--r--sql/sql_statistics.cc1
-rw-r--r--sql/sql_string.cc6
-rw-r--r--sql/sql_table.cc40
-rw-r--r--sql/sql_trigger.cc4
-rw-r--r--sql/sql_udf.cc3
-rw-r--r--sql/sql_union.cc16
-rw-r--r--sql/sql_update.cc36
-rw-r--r--sql/sql_view.cc14
-rw-r--r--sql/sql_view.h4
-rw-r--r--sql/sql_yacc.yy132
-rw-r--r--sql/strfunc.cc2
-rw-r--r--sql/sys_vars.cc25
-rw-r--r--sql/table.cc9
-rw-r--r--sql/table.h129
-rw-r--r--sql/table_cache.cc2
-rw-r--r--sql/table_cache.h4
-rw-r--r--sql/threadpool_win.cc4
-rw-r--r--sql/tztime.cc8
-rw-r--r--sql/uniques.cc70
-rw-r--r--sql/uniques.h100
-rw-r--r--sql/unireg.h19
-rw-r--r--sql/wsrep_sst.cc136
111 files changed, 3548 insertions, 1669 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 9b0017c9124..6136c39fc9c 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -112,7 +112,8 @@ SET (SQL_SOURCE
sql_statistics.cc sql_string.cc
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
- sql_time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
+ sql_time.cc tztime.cc unireg.cc item_xmlfunc.cc
+ uniques.cc uniques.h
rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc
event_queue.cc event_db_repository.cc
sql_tablespace.cc events.cc ../sql-common/my_user.c
@@ -137,6 +138,7 @@ SET (SQL_SOURCE
my_json_writer.cc my_json_writer.h
rpl_gtid.cc rpl_parallel.cc
sql_type.cc sql_type.h
+ sql_cte.cc sql_cte.h
${WSREP_SOURCES}
table_cache.cc encryption.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_builtin.cc
diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h
index 2d4e6cff96d..88c2bbc238d 100644
--- a/sql/bounded_queue.h
+++ b/sql/bounded_queue.h
@@ -16,11 +16,11 @@
#ifndef BOUNDED_QUEUE_INCLUDED
#define BOUNDED_QUEUE_INCLUDED
-#include <string.h>
#include "my_global.h"
#include "my_base.h"
#include "my_sys.h"
#include "queues.h"
+#include <string.h>
class Sort_param;
diff --git a/sql/contributors.h b/sql/contributors.h
index 255decd19cc..04f8b74aa65 100644
--- a/sql/contributors.h
+++ b/sql/contributors.h
@@ -39,17 +39,17 @@ struct show_table_contributors_st show_table_contributors[]= {
/* MariaDB foundation members, in contribution, size , time order */
{"Booking.com", "http://www.booking.com", "Founding member of the MariaDB Foundation"},
{"MariaDB Corporation", "https://mariadb.com", "Founding member of the MariaDB Foundation"},
- {"Auttomattic", "http://automattic.com", "Member of the MariaDB Foundation"},
- {"Parallels", "http://www.parallels.com/products/plesk", "Founding member of the MariaDB Foundation"},
+ {"Auttomattic", "http://automattic.com", "Member of the MariaDB Foundation"},
+ {"Visma", "http://visma.com", "Member of the MariaDB Foundation"},
+ {"Nexedi", "http://www.nexedi.com", "Member of the MariaDB Foundation"},
{"Acronis", "http://www.acronis.com", "Member of the MariaDB Foundation"},
/* Smaller sponsors, newer per year */
{"Verkkokauppa.com", "Finland", "Sponsor of the MariaDB Foundation"},
{"Webyog", "Bangalore", "Sponsor of the MariaDB Foundation"},
- {"Wikimedia Foundation", "USA", "Sponsor of the MariaDB Foundation"},
/* Sponsors of important features */
- {"Google", "USA", "Sponsoring parallel replication and GTID" },
+ {"Google", "USA", "Sponsoring encryption, parallel replication and GTID"},
{"Facebook", "USA", "Sponsoring non-blocking API, LIMIT ROWS EXAMINED etc"},
/* Individual contributors, names in historical order, newer first */
diff --git a/sql/derror.cc b/sql/derror.cc
index bc4b89493aa..b9b55c5b6e9 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -30,16 +30,19 @@
#include "derror.h" // read_texts
#include "sql_class.h" // THD
+uint errors_per_range[MAX_ERROR_RANGES+1];
+
static bool check_error_mesg(const char *file_name, const char **errmsg);
static void init_myfunc_errs(void);
C_MODE_START
-static const char **get_server_errmsgs()
+static const char **get_server_errmsgs(int nr)
{
+ int section= (nr-ER_ERROR_FIRST) / ERRORS_PER_RANGE;
if (!current_thd)
- return DEFAULT_ERRMSGS;
- return CURRENT_THD_ERRMSGS;
+ return DEFAULT_ERRMSGS[section];
+ return CURRENT_THD_ERRMSGS[section];
}
C_MODE_END
@@ -60,61 +63,88 @@ C_MODE_END
TRUE Error
*/
+static const char ***original_error_messages;
+
bool init_errmessage(void)
{
- const char **errmsgs, **ptr, **org_errmsgs;
+ const char **errmsgs;
bool error= FALSE;
DBUG_ENTER("init_errmessage");
- /*
- Get a pointer to the old error messages pointer array.
- read_texts() tries to free it.
- */
- org_errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST);
+ free_error_messages();
+ my_free(original_error_messages);
+ original_error_messages= 0;
+
+ error_message_charset_info= system_charset_info;
/* Read messages from file. */
if (read_texts(ERRMSG_FILE, my_default_lc_messages->errmsgs->language,
- &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) &&
- !errmsgs)
+ &original_error_messages))
{
- my_free(errmsgs);
-
- if (org_errmsgs)
- {
- /* Use old error messages */
- errmsgs= org_errmsgs;
- }
- else
+ /*
+ No error messages. Create a temporary empty error message so
+ that we don't get a crash if some code wrongly tries to access
+ a non existing error message.
+ */
+ if (!(original_error_messages= (const char***)
+ my_malloc(MAX_ERROR_RANGES * sizeof(char**) +
+ (ERRORS_PER_RANGE * sizeof(char*)),
+ MYF(0))))
+ DBUG_RETURN(TRUE);
+ errmsgs= (const char**) (original_error_messages + MAX_ERROR_RANGES);
+
+ for (uint i=0 ; i < MAX_ERROR_RANGES ; i++)
{
- /*
- No error messages. Create a temporary empty error message so
- that we don't get a crash if some code wrongly tries to access
- a non existing error message.
- */
- if (!(errmsgs= (const char**) my_malloc((ER_ERROR_LAST-ER_ERROR_FIRST+1)*
- sizeof(char*), MYF(0))))
- DBUG_RETURN(TRUE);
- for (ptr= errmsgs; ptr < errmsgs + ER_ERROR_LAST - ER_ERROR_FIRST; ptr++)
- *ptr= "";
- error= TRUE;
+ original_error_messages[i]= errmsgs;
+ errors_per_range[i]= ERRORS_PER_RANGE;
}
+ errors_per_range[2]= 0; // MYSYS error messages
+
+ for (const char **ptr= errmsgs;
+ ptr < errmsgs + ERRORS_PER_RANGE ;
+ ptr++)
+ *ptr= "";
+
+ error= TRUE;
}
- else
- my_free(org_errmsgs); // Free old language
/* Register messages for use with my_error(). */
- if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST))
+ for (uint i=0 ; i < MAX_ERROR_RANGES ; i++)
{
- my_free(errmsgs);
- DBUG_RETURN(TRUE);
+ if (errors_per_range[i])
+ {
+ if (my_error_register(get_server_errmsgs, (i+1)*ERRORS_PER_RANGE,
+ (i+1)*ERRORS_PER_RANGE +
+ errors_per_range[i]-1))
+ {
+ my_free(original_error_messages);
+ original_error_messages= 0;
+ DBUG_RETURN(TRUE);
+ }
+ }
}
-
- DEFAULT_ERRMSGS= errmsgs; /* Init global variable */
+ DEFAULT_ERRMSGS= original_error_messages;
init_myfunc_errs(); /* Init myfunc messages */
DBUG_RETURN(error);
}
+void free_error_messages()
+{
+ /* We don't need to free errmsg as it's done in cleanup_errmsg */
+ for (uint i= 0 ; i < MAX_ERROR_RANGES ; i++)
+ {
+ if (errors_per_range[i])
+ {
+ my_error_unregister((i+1)*ERRORS_PER_RANGE,
+ (i+1)*ERRORS_PER_RANGE +
+ errors_per_range[i]-1);
+ errors_per_range[i]= 0;
+ }
+ }
+}
+
+
/**
Check the error messages array contains all relevant error messages
*/
@@ -125,11 +155,17 @@ static bool check_error_mesg(const char *file_name, const char **errmsg)
The last MySQL error message can't be an empty string; If it is,
it means that the error file doesn't contain all MySQL messages
and is probably from an older version of MySQL / MariaDB.
+ We also check that each section has enough error messages.
*/
- if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0)
+ if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0 ||
+ (errors_per_range[0] < ER_ERROR_LAST_SECTION_2 - ER_ERROR_FIRST + 1) ||
+ errors_per_range[1] != 0 ||
+ (errors_per_range[2] < ER_ERROR_LAST_SECTION_4 -
+ ER_ERROR_FIRST_SECTION_4 +1) ||
+ (errors_per_range[2] < ER_ERROR_LAST - ER_ERROR_FIRST_SECTION_5 + 1))
{
sql_print_error("Error message file '%s' is probably from and older "
- "version of MariaDB / MYSQL as it doesn't contain all "
+ "version of MariaDB as it doesn't contain all "
"error messages", file_name);
return 1;
}
@@ -137,27 +173,28 @@ static bool check_error_mesg(const char *file_name, const char **errmsg)
}
-/**
- Read text from packed textfile in language-directory.
+struct st_msg_file
+{
+ uint sections;
+ uint max_error;
+ uint errors;
+ size_t text_length;
+};
- If we can't read messagefile then it's panic- we can't continue.
+/**
+ Open file for packed textfile in language-directory.
*/
-bool read_texts(const char *file_name, const char *language,
- const char ***point, uint error_messages)
+static File open_error_msg_file(const char *file_name, const char *language,
+ uint error_messages, struct st_msg_file *ret)
{
- register uint i;
- uint count,funktpos;
- size_t offset, length;
+ int error_pos= 0;
File file;
char name[FN_REFLEN];
char lang_path[FN_REFLEN];
- uchar *UNINIT_VAR(buff);
- uchar head[32],*pos;
- DBUG_ENTER("read_texts");
+ uchar head[32];
+ DBUG_ENTER("open_error_msg_file");
- *point= 0;
- funktpos=0;
convert_dirname(lang_path, language, NullS);
(void) my_load_path(lang_path, lang_path, lc_messages_dir);
if ((file= mysql_file_open(key_file_ERRMSG,
@@ -168,69 +205,121 @@ bool read_texts(const char *file_name, const char *language,
/*
Trying pre-5.4 sematics of the --language parameter.
It included the language-specific part, e.g.:
-
--language=/path/to/english/
*/
if ((file= mysql_file_open(key_file_ERRMSG,
- fn_format(name, file_name, lc_messages_dir, "", 4),
+ fn_format(name, file_name, lc_messages_dir, "",
+ 4),
O_RDONLY | O_SHARE | O_BINARY,
MYF(0))) < 0)
goto err;
sql_print_warning("An old style --language or -lc-message-dir value with language specific part detected: %s", lc_messages_dir);
sql_print_warning("Use --lc-messages-dir without language specific part instead.");
}
-
- funktpos=1;
+ error_pos=1;
if (mysql_file_read(file, (uchar*) head, 32, MYF(MY_NABP)))
goto err;
- funktpos=2;
+ error_pos=2;
if (head[0] != (uchar) 254 || head[1] != (uchar) 254 ||
- head[2] != 2 || head[3] != 3)
+ head[2] != 2 || head[3] != 4)
goto err; /* purecov: inspected */
- error_message_charset_info= system_charset_info;
- length=uint4korr(head+6); count=uint2korr(head+10);
+ ret->text_length= uint4korr(head+6);
+ ret->max_error= uint2korr(head+10);
+ ret->errors= uint2korr(head+12);
+ ret->sections= uint2korr(head+14);
- if (count < error_messages)
+ if (ret->max_error < error_messages || ret->sections != MAX_ERROR_RANGES)
{
sql_print_error("\
Error message file '%s' had only %d error messages, but it should contain at least %d error messages.\nCheck that the above file is the right version for this program!",
- name,count,error_messages);
+ name,ret->errors,error_messages);
(void) mysql_file_close(file, MYF(MY_WME));
- DBUG_RETURN(1);
+ DBUG_RETURN(FERR);
}
+ DBUG_RETURN(file);
- if (!(*point= (const char**)
- my_malloc((size_t) (MY_MAX(length,count*2)+count*sizeof(char*)),MYF(0))))
- {
- funktpos=3; /* purecov: inspected */
+err:
+ sql_print_error((error_pos == 2) ?
+ "Incompatible header in messagefile '%s'. Probably from "
+ "another version of MariaDB" :
+ ((error_pos == 1) ? "Can't read from messagefile '%s'" :
+ "Can't find messagefile '%s'"), name);
+ if (file != FERR)
+ (void) mysql_file_close(file, MYF(MY_WME));
+ DBUG_RETURN(FERR);
+}
+
+
+/*
+ Define the number of normal and extra error messages in the errmsg.sys
+ file
+*/
+
+static const uint error_messages= ER_ERROR_LAST - ER_ERROR_FIRST+1;
+
+/**
+ Read text from packed textfile in language-directory.
+*/
+
+bool read_texts(const char *file_name, const char *language,
+ const char ****data)
+{
+ uint i, range_size;
+ const char **point;
+ size_t offset;
+ File file;
+ uchar *buff, *pos;
+ struct st_msg_file msg_file;
+ DBUG_ENTER("read_texts");
+
+ if ((file= open_error_msg_file(file_name, language, error_messages,
+ &msg_file)) == FERR)
+ DBUG_RETURN(1);
+
+ if (!(*data= (const char***)
+ my_malloc((size_t) ((MAX_ERROR_RANGES+1) * sizeof(char**) +
+ MY_MAX(msg_file.text_length, msg_file.errors * 2)+
+ msg_file.errors * sizeof(char*)),
+ MYF(MY_WME))))
goto err; /* purecov: inspected */
- }
- buff= (uchar*) (*point + count);
- if (mysql_file_read(file, buff, (size_t) count*2, MYF(MY_NABP)))
+ point= (const char**) ((*data) + MAX_ERROR_RANGES);
+ buff= (uchar*) (point + msg_file.errors);
+
+ if (mysql_file_read(file, buff,
+ (size_t) (msg_file.errors + msg_file.sections) * 2,
+ MYF(MY_NABP | MY_WME)))
goto err;
- for (i=0, offset=0, pos= buff ; i< count ; i++)
+
+ pos= buff;
+ /* read in sections */
+ for (i= 0, offset= 0; i < msg_file.sections ; i++)
{
- (*point)[i]= (char*) buff+offset;
- offset+= uint2korr(pos);
+ (*data)[i]= point + offset;
+ errors_per_range[i]= range_size= uint2korr(pos);
+ offset+= range_size;
+ pos+= 2;
+ }
+
+ /* Calculate pointers to text data */
+ for (i=0, offset=0 ; i < msg_file.errors ; i++)
+ {
+ point[i]= (char*) buff+offset;
+ offset+=uint2korr(pos);
pos+=2;
}
- if (mysql_file_read(file, buff, length, MYF(MY_NABP)))
+
+ /* Read error message texts */
+ if (mysql_file_read(file, buff, msg_file.text_length, MYF(MY_NABP | MY_WME)))
goto err;
- (void) mysql_file_close(file, MYF(0));
+ (void) mysql_file_close(file, MYF(MY_WME));
- i= check_error_mesg(file_name, *point);
- DBUG_RETURN(i);
+ DBUG_RETURN(check_error_mesg(file_name, point));
err:
- sql_print_error((funktpos == 3) ? "Not enough memory for messagefile '%s'" :
- (funktpos == 2) ? "Incompatible header in messagefile '%s'. Probably from another version of MariaDB" :
- ((funktpos == 1) ? "Can't read from messagefile '%s'" :
- "Can't find messagefile '%s'"), name);
- if (file != FERR)
- (void) mysql_file_close(file, MYF(MY_WME));
+ (void) mysql_file_close(file, MYF(0));
DBUG_RETURN(1);
} /* read_texts */
diff --git a/sql/derror.h b/sql/derror.h
index b2f6331e048..9f2aee71c7e 100644
--- a/sql/derror.h
+++ b/sql/derror.h
@@ -19,7 +19,8 @@
#include "my_global.h" /* uint */
bool init_errmessage(void);
+void free_error_messages();
bool read_texts(const char *file_name, const char *language,
- const char ***point, uint error_messages);
+ const char ****data);
#endif /* DERROR_INCLUDED */
diff --git a/sql/discover.cc b/sql/discover.cc
index 82648e94bc5..d8ed718fc58 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -199,14 +199,15 @@ int extension_based_table_discovery(MY_DIR *dirp, const char *ext_meta,
end= cur + dirp->number_of_files;
while (cur < end)
{
- char *octothorp= strrchr(cur->name + 1, '#');
+ char *octothorp= strchr(cur->name + 1, '#');
char *ext= strchr(octothorp ? octothorp : cur->name, FN_EXTCHAR);
if (ext)
{
size_t len= (octothorp ? octothorp : ext) - cur->name;
if (from != cur &&
- (my_strnncoll(cs, (uchar*)from->name, len, (uchar*)cur->name, len) ||
+ (strlen(from->name) <= len ||
+ my_strnncoll(cs, (uchar*)from->name, len, (uchar*)cur->name, len) ||
(from->name[len] != FN_EXTCHAR && from->name[len] != '#')))
advance(from, to, cur, skip);
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index e39f727800a..5d0754cf766 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -499,7 +499,8 @@ Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table,
READ_RECORD read_record_info;
DBUG_ENTER("Event_db_repository::table_scan_all_for_i_s");
- if (init_read_record(&read_record_info, thd, event_table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, event_table, NULL, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(TRUE);
/*
@@ -947,7 +948,7 @@ end:
@retval FALSE an event with such db/name key exists
- @retval TRUE no record found or an error occured.
+ @retval TRUE no record found or an error occurred.
*/
bool
@@ -1015,7 +1016,7 @@ Event_db_repository::drop_schema_events(THD *thd, LEX_STRING schema)
DBUG_VOID_RETURN;
/* only enabled events are in memory, so we go now and delete the rest */
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL, 1, 0, FALSE))
goto end;
while (!ret && !(read_record_info.read_record(&read_record_info)) )
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index 35187af23ac..ae8ba258717 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -191,7 +191,7 @@ Event_queue::deinit_queue()
@param[out] created set to TRUE if no error and the element is
added to the queue, FALSE otherwise
- @retval TRUE an error occured. The value of created is undefined,
+ @retval TRUE an error occurred. The value of created is undefined,
the element was not deleted.
@retval FALSE success
*/
diff --git a/sql/events.cc b/sql/events.cc
index b80ec993ac4..5ef4d6f55a5 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -102,7 +102,7 @@ ulong Events::inited;
int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs)
{
return cs->coll->strnncollsp(cs, (uchar *) s.str,s.length,
- (uchar *) t.str,t.length, 0);
+ (uchar *) t.str,t.length);
}
@@ -333,6 +333,10 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
if (check_access(thd, EVENT_ACL, parse_data->dbname.str, NULL, NULL, 0, 0))
DBUG_RETURN(TRUE);
+ if (lock_object_name(thd, MDL_key::EVENT,
+ parse_data->dbname.str, parse_data->name.str))
+ DBUG_RETURN(TRUE);
+
if (check_db_dir_existence(parse_data->dbname.str))
{
my_error(ER_BAD_DB_ERROR, MYF(0), parse_data->dbname.str);
@@ -347,10 +351,6 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
*/
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
- if (lock_object_name(thd, MDL_key::EVENT,
- parse_data->dbname.str, parse_data->name.str))
- DBUG_RETURN(TRUE);
-
if (thd->lex->create_info.or_replace() && event_queue)
event_queue->drop_event(thd, parse_data->dbname, parse_data->name);
@@ -454,6 +454,16 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
if (check_access(thd, EVENT_ACL, parse_data->dbname.str, NULL, NULL, 0, 0))
DBUG_RETURN(TRUE);
+ if (lock_object_name(thd, MDL_key::EVENT,
+ parse_data->dbname.str, parse_data->name.str))
+ DBUG_RETURN(TRUE);
+
+ if (check_db_dir_existence(parse_data->dbname.str))
+ {
+ my_error(ER_BAD_DB_ERROR, MYF(0), parse_data->dbname.str);
+ DBUG_RETURN(TRUE);
+ }
+
if (new_dbname) /* It's a rename */
{
@@ -476,6 +486,13 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
if (check_access(thd, EVENT_ACL, new_dbname->str, NULL, NULL, 0, 0))
DBUG_RETURN(TRUE);
+ /*
+ Acquire mdl exclusive lock on target database name.
+ */
+ if (lock_object_name(thd, MDL_key::EVENT,
+ new_dbname->str, new_name->str))
+ DBUG_RETURN(TRUE);
+
/* Check that the target database exists */
if (check_db_dir_existence(new_dbname->str))
{
@@ -490,10 +507,6 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
*/
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
- if (lock_object_name(thd, MDL_key::EVENT,
- parse_data->dbname.str, parse_data->name.str))
- DBUG_RETURN(TRUE);
-
/* On error conditions my_error() is called so no need to handle here */
if (!(ret= db_repository->update_event(thd, parse_data,
new_dbname, new_name)))
@@ -1134,7 +1147,7 @@ Events::load_events_from_db(THD *thd)
DBUG_RETURN(TRUE);
}
- if (init_read_record(&read_record_info, thd, table, NULL, 0, 1, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL, 0, 1, FALSE))
{
close_thread_tables(thd);
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index 177f219c137..a5d2d759edc 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1224,7 +1224,8 @@ bool Field::test_if_equality_guarantees_uniqueness(const Item *item) const
for temporal columns, so the query:
WHERE temporal_column='string'
cannot return multiple distinct temporal values.
- QQ: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items.
+
+ TODO: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items.
*/
return result_type() == item->result_type();
}
@@ -5647,6 +5648,18 @@ Item *Field_temporal::get_equal_const_item_datetime(THD *thd,
}
break;
case ANY_SUBST:
+ if (!is_temporal_type_with_date(const_item->field_type()))
+ {
+ MYSQL_TIME ltime;
+ if (const_item->get_date_with_conversion(&ltime,
+ TIME_FUZZY_DATES |
+ TIME_INVALID_DATES))
+ return NULL;
+ return new (thd->mem_root)
+ Item_datetime_literal_for_invalid_dates(thd, &ltime,
+ ltime.second_part ?
+ TIME_SECOND_PART_DIGITS : 0);
+ }
break;
}
return const_item;
@@ -5955,7 +5968,10 @@ Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
{
MYSQL_TIME ltime;
// Get the value of const_item with conversion from DATETIME to TIME
- if (const_item->get_time_with_conversion(thd, &ltime, TIME_TIME_ONLY))
+ if (const_item->get_time_with_conversion(thd, &ltime,
+ TIME_TIME_ONLY |
+ TIME_FUZZY_DATES |
+ TIME_INVALID_DATES))
return NULL;
/*
Replace a DATE/DATETIME constant to a TIME constant:
@@ -7121,8 +7137,7 @@ int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr)
*/
return field_charset->coll->strnncollsp(field_charset,
a_ptr, a_len,
- b_ptr, b_len,
- 0);
+ b_ptr, b_len);
}
@@ -7496,7 +7511,7 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
a_length,
b_ptr+
length_bytes,
- b_length,0);
+ b_length);
return diff;
}
@@ -7519,7 +7534,7 @@ int Field_varstring::key_cmp(const uchar *key_ptr, uint max_key_length)
length,
key_ptr+
HA_KEY_BLOB_LENGTH,
- uint2korr(key_ptr), 0);
+ uint2korr(key_ptr));
}
@@ -7537,8 +7552,7 @@ int Field_varstring::key_cmp(const uchar *a,const uchar *b)
a + HA_KEY_BLOB_LENGTH,
uint2korr(a),
b + HA_KEY_BLOB_LENGTH,
- uint2korr(b),
- 0);
+ uint2korr(b));
}
@@ -7868,7 +7882,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
uint copy_length, new_length;
String_copier copier;
- const char *tmp;
+ char *tmp;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmpstr(buff,sizeof(buff), &my_charset_bin);
@@ -7878,6 +7892,29 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
return 0;
}
+ if (table->blob_storage) // GROUP_CONCAT with ORDER BY | DISTINCT
+ {
+ DBUG_ASSERT(!f_is_hex_escape(flags));
+ DBUG_ASSERT(field_charset == cs);
+ DBUG_ASSERT(length <= max_data_length());
+
+ new_length= length;
+ copy_length= table->in_use->variables.group_concat_max_len;
+ if (new_length > copy_length)
+ {
+ int well_formed_error;
+ new_length= cs->cset->well_formed_len(cs, from, from + copy_length,
+ new_length, &well_formed_error);
+ table->blob_storage->set_truncated_value(true);
+ }
+ if (!(tmp= table->blob_storage->store(from, new_length)))
+ goto oom_error;
+
+ Field_blob::store_length(new_length);
+ bmove(ptr + packlength, (uchar*) &tmp, sizeof(char*));
+ return 0;
+ }
+
/*
If the 'from' address is in the range of the temporary 'value'-
object we need to copy the content to a different location or it will be
@@ -7904,15 +7941,14 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
new_length= MY_MIN(max_data_length(), field_charset->mbmaxlen * length);
if (value.alloc(new_length))
goto oom_error;
-
+ tmp= const_cast<char*>(value.ptr());
if (f_is_hex_escape(flags))
{
copy_length= my_copy_with_hex_escaping(field_charset,
- (char*) value.ptr(), new_length,
- from, length);
+ tmp, new_length,
+ from, length);
Field_blob::store_length(copy_length);
- tmp= value.ptr();
bmove(ptr + packlength, (uchar*) &tmp, sizeof(char*));
return 0;
}
@@ -7920,7 +7956,6 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
(char*) value.ptr(), new_length,
cs, from, length);
Field_blob::store_length(copy_length);
- tmp= value.ptr();
bmove(ptr+packlength,(uchar*) &tmp,sizeof(char*));
return check_conversion_status(&copier, from + length, cs, true);
@@ -8017,8 +8052,7 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
uint32 b_length)
{
return field_charset->coll->strnncollsp(field_charset,
- a, a_length, b, b_length,
- 0);
+ a, a_length, b, b_length);
}
diff --git a/sql/field.h b/sql/field.h
index 3c6e318f270..736c51c2ac3 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -3753,7 +3753,7 @@ public:
const char *field_name_arg) const
{
return ::make_field(share, mem_root, ptr,
- length, null_pos, null_bit,
+ (uint32)length, null_pos, null_bit,
pack_flag, sql_type, charset,
geom_type, srid, unireg_check, interval,
field_name_arg);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 6823329132d..263c5bb5017 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -642,9 +642,6 @@ void Copy_field::set(uchar *to,Field *from)
Field_blob::store. Is this in order to trigger the call to
well_formed_copy_nchars, by changing the pointer copy->tmp.ptr()?
That call will take place anyway in all known cases.
-
- - The above causes a truncation to MAX_FIELD_WIDTH. Is this the intended
- effect? Truncation is handled by well_formed_copy_nchars anyway.
*/
void Copy_field::set(Field *to,Field *from,bool save)
{
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 9289d712cbc..54a79421d2e 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -50,26 +50,27 @@ if (my_b_write((file),(uchar*) (from),param->ref_length)) \
static uchar *read_buffpek_from_file(IO_CACHE *buffer_file, uint count,
uchar *buf);
static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
- Filesort_info *fs_info,
+ SORT_INFO *fs_info,
IO_CACHE *buffer_file,
IO_CACHE *tempfile,
Bounded_queue<uchar, uchar> *pq,
ha_rows *found_rows);
-static bool write_keys(Sort_param *param, Filesort_info *fs_info,
+static bool write_keys(Sort_param *param, SORT_INFO *fs_info,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
static void register_used_fields(Sort_param *param);
static bool save_index(Sort_param *param, uint count,
- Filesort_info *table_sort);
+ SORT_INFO *table_sort);
static uint suffix_length(ulong string_length);
static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset);
static SORT_ADDON_FIELD *get_addon_fields(ulong max_length_for_sort_data,
Field **ptabfield,
- uint sortlength, uint *plength);
+ uint sortlength,
+ LEX_STRING *addon_buf);
static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
uchar *buff, uchar *buff_end);
-static bool check_if_pq_applicable(Sort_param *param, Filesort_info *info,
+static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
TABLE *table,
ha_rows records, ulong memory_available);
@@ -78,6 +79,8 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
ulong max_length_for_sort_data,
ha_rows maxrows, bool sort_positions)
{
+ DBUG_ASSERT(addon_field == 0 && addon_buf.length == 0);
+
sort_length= sortlen;
ref_length= table->file->ref_length;
if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
@@ -85,13 +88,13 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
{
/*
Get the descriptors of all fields whose values are appended
- to sorted fields and get its total length in addon_length.
+ to sorted fields and get its total length in addon_buf.length
*/
addon_field= get_addon_fields(max_length_for_sort_data,
- table->field, sort_length, &addon_length);
+ table->field, sort_length, &addon_buf);
}
if (addon_field)
- res_length= addon_length;
+ res_length= addon_buf.length;
else
{
res_length= ref_length;
@@ -101,7 +104,7 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
*/
sort_length+= ref_length;
}
- rec_length= sort_length + addon_length;
+ rec_length= sort_length + addon_buf.length;
max_rows= maxrows;
}
@@ -115,8 +118,9 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
Before calling filesort, one must have done
table->file->info(HA_STATUS_VARIABLE)
- The result set is stored in table->io_cache or
- table->record_pointers.
+ The result set is stored in
+ filesort_info->io_cache or
+ filesort_info->record_pointers.
@param thd Current thread
@param table Table to sort
@@ -124,28 +128,24 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
@param s_length Number of elements in sortorder
@param select Condition to apply to the rows
@param max_rows Return only this many rows
- @param sort_positions Set to TRUE if we want to force sorting by position
+ @param sort_positions Set to TRUE if we want to force sorting by
+ position
(Needed by UPDATE/INSERT or ALTER TABLE or
when rowids are required by executor)
- @param[out] examined_rows Store number of examined rows here
- @param[out] found_rows Store the number of found rows here
-
@note
If we sort by position (like if sort_positions is 1) filesort() will
call table->prepare_for_position().
@retval
- HA_POS_ERROR Error
- @retval
- \# Number of rows
+ 0 Error
+ # SORT_INFO
*/
-ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows max_rows,
- bool sort_positions,
- ha_rows *examined_rows,
- ha_rows *found_rows,
- Filesort_tracker* tracker)
+SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder,
+ uint s_length,
+ SQL_SELECT *select, ha_rows max_rows,
+ bool sort_positions,
+ Filesort_tracker* tracker)
{
int error;
size_t memory_available= thd->variables.sortbuff_size;
@@ -162,33 +162,37 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_PUSH(""); /* No DBUG here */
#endif
- Filesort_info table_sort= table->sort;
+ SORT_INFO *sort;
TABLE_LIST *tab= table->pos_in_table_list;
Item_subselect *subselect= tab ? tab->containing_subselect() : 0;
-
MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str);
DEBUG_SYNC(thd, "filesort_start");
+ if (!(sort= new SORT_INFO))
+ return 0;
+
+ if (subselect && subselect->filesort_buffer.is_allocated())
+ {
+ /* Reuse cache from last call */
+ sort->filesort_buffer= subselect->filesort_buffer;
+ sort->buffpek= subselect->sortbuffer;
+ subselect->filesort_buffer.reset();
+ subselect->sortbuffer.str=0;
+ }
+
+ outfile= &sort->io_cache;
+
/*
Release InnoDB's adaptive hash index latch (if holding) before
running a sort.
*/
ha_release_temporary_latches(thd);
- /*
- Don't use table->sort in filesort as it is also used by
- QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end
- when index_merge select has finished with it.
- */
- table->sort.io_cache= NULL;
- DBUG_ASSERT(table_sort.record_pointers == NULL);
-
- outfile= table_sort.io_cache;
my_b_clear(&tempfile);
my_b_clear(&buffpek_pointers);
buffpek=0;
error= 1;
- *found_rows= HA_POS_ERROR;
+ sort->found_rows= HA_POS_ERROR;
param.init_for_filesort(sortlength(thd, sortorder, s_length,
&multi_byte_charset),
@@ -196,14 +200,12 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
thd->variables.max_length_for_sort_data,
max_rows, sort_positions);
- table_sort.addon_buf= 0;
- table_sort.addon_length= param.addon_length;
- table_sort.addon_field= param.addon_field;
- table_sort.unpack= unpack_addon_fields;
- if (param.addon_field &&
- !(table_sort.addon_buf=
- (uchar *) my_malloc(param.addon_length, MYF(MY_WME |
- MY_THREAD_SPECIFIC))))
+ sort->addon_buf= param.addon_buf;
+ sort->addon_field= param.addon_field;
+ sort->unpack= unpack_addon_fields;
+ if (multi_byte_charset &&
+ !(param.tmp_buffer= (char*) my_malloc(param.sort_length,
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
goto err;
if (select && select->quick)
@@ -216,12 +218,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
// If number of rows is not known, use as much of sort buffer as possible.
num_rows= table->file->estimate_rows_upper_bound();
- if (multi_byte_charset &&
- !(param.tmp_buffer= (char*) my_malloc(param.sort_length,
- MYF(MY_WME | MY_THREAD_SPECIFIC))))
- goto err;
-
- if (check_if_pq_applicable(&param, &table_sort,
+ if (check_if_pq_applicable(&param, sort,
table, num_rows, memory_available))
{
DBUG_PRINT("info", ("filesort PQ is applicable"));
@@ -233,45 +230,31 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
true, // max_at_top
NULL, // compare_function
compare_length,
- &make_sortkey, &param, table_sort.get_sort_keys()))
+ &make_sortkey, &param, sort->get_sort_keys()))
{
/*
If we fail to init pq, we have to give up:
out of memory means my_malloc() will call my_error().
*/
DBUG_PRINT("info", ("failed to allocate PQ"));
- table_sort.free_sort_buffer();
DBUG_ASSERT(thd->is_error());
goto err;
}
// For PQ queries (with limit) we initialize all pointers.
- table_sort.init_record_pointers();
+ sort->init_record_pointers();
}
else
{
DBUG_PRINT("info", ("filesort PQ is not applicable"));
- size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+ size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY,
+ param.sort_length*MERGEBUFF2);
set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys);
- if (table_sort.get_sort_keys())
- {
- // If we have already allocated a buffer, it better have same size!
- if (!table_sort.check_sort_buffer_properties(param.max_keys_per_buffer,
- param.rec_length))
- {
- /*
- table->sort will still have a pointer to the same buffer,
- but that will be overwritten by the assignment below.
- */
- table_sort.free_sort_buffer();
- }
- }
- table_sort.alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length);
- if (table_sort.get_sort_keys())
+ if (sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length))
break;
size_t old_memory_available= memory_available;
memory_available= memory_available/4*3;
@@ -284,7 +267,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
goto err;
}
- tracker->report_sort_buffer_size(table_sort.sort_buffer_size());
+ tracker->report_sort_buffer_size(sort->sort_buffer_size());
}
if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,
@@ -294,21 +277,21 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.sort_form= table;
param.end=(param.local_sortorder=sortorder)+s_length;
num_rows= find_all_keys(thd, &param, select,
- &table_sort,
+ sort,
&buffpek_pointers,
&tempfile,
pq.is_initialized() ? &pq : NULL,
- found_rows);
+ &sort->found_rows);
if (num_rows == HA_POS_ERROR)
goto err;
maxbuffer= (uint) (my_b_tell(&buffpek_pointers)/sizeof(*buffpek));
tracker->report_merge_passes_at_start(thd->query_plan_fsort_passes);
- tracker->report_row_numbers(param.examined_rows, *found_rows, num_rows);
+ tracker->report_row_numbers(param.examined_rows, sort->found_rows, num_rows);
if (maxbuffer == 0) // The whole set is in memory
{
- if (save_index(&param, (uint) num_rows, &table_sort))
+ if (save_index(&param, (uint) num_rows, sort))
goto err;
}
else
@@ -316,17 +299,17 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
/* filesort cannot handle zero-length records during merge. */
DBUG_ASSERT(param.sort_length != 0);
- if (table_sort.buffpek && table_sort.buffpek_len < maxbuffer)
+ if (sort->buffpek.str && sort->buffpek.length < maxbuffer)
{
- my_free(table_sort.buffpek);
- table_sort.buffpek= 0;
+ my_free(sort->buffpek.str);
+ sort->buffpek.str= 0;
}
- if (!(table_sort.buffpek=
- (uchar *) read_buffpek_from_file(&buffpek_pointers, maxbuffer,
- table_sort.buffpek)))
+ if (!(sort->buffpek.str=
+ (char *) read_buffpek_from_file(&buffpek_pointers, maxbuffer,
+ (uchar*) sort->buffpek.str)))
goto err;
- buffpek= (BUFFPEK *) table_sort.buffpek;
- table_sort.buffpek_len= maxbuffer;
+ sort->buffpek.length= maxbuffer;
+ buffpek= (BUFFPEK *) sort->buffpek.str;
close_cached_file(&buffpek_pointers);
/* Open cached file if it isn't open */
if (! my_b_inited(outfile) &&
@@ -345,7 +328,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.rec_length - 1);
maxbuffer--; // Offset from 0
if (merge_many_buff(&param,
- (uchar*) table_sort.get_sort_keys(),
+ (uchar*) sort->get_sort_keys(),
buffpek,&maxbuffer,
&tempfile))
goto err;
@@ -353,7 +336,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
goto err;
if (merge_index(&param,
- (uchar*) table_sort.get_sort_keys(),
+ (uchar*) sort->get_sort_keys(),
buffpek,
maxbuffer,
&tempfile,
@@ -372,11 +355,18 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_free(param.tmp_buffer);
if (!subselect || !subselect->is_uncacheable())
{
- table_sort.free_sort_buffer();
- my_free(buffpek);
- table_sort.buffpek= 0;
- table_sort.buffpek_len= 0;
+ sort->free_sort_buffer();
+ my_free(sort->buffpek.str);
}
+ else
+ {
+ /* Remember sort buffers for next subquery call */
+ subselect->filesort_buffer= sort->filesort_buffer;
+ subselect->sortbuffer= sort->buffpek;
+ sort->filesort_buffer.reset(); // Don't free this
+ }
+ sort->buffpek.str= 0;
+
close_cached_file(&tempfile);
close_cached_file(&buffpek_pointers);
if (my_b_inited(outfile))
@@ -397,13 +387,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
int kill_errno= thd->killed_errno();
DBUG_ASSERT(thd->is_error() || kill_errno || thd->killed == ABORT_QUERY);
- /*
- We replace the table->sort at the end.
- Hence calling free_io_cache to make sure table->sort.io_cache
- used for QUICK_INDEX_MERGE_SELECT is free.
- */
- free_io_cache(table);
-
my_printf_error(ER_FILSORT_ABORT,
"%s: %s",
MYF(0),
@@ -424,50 +407,26 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
}
else
thd->inc_status_sort_rows(num_rows);
- *examined_rows= param.examined_rows;
+
+ sort->examined_rows= param.examined_rows;
+ sort->return_rows= num_rows;
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_POP(); /* Ok to DBUG */
#endif
- /* table->sort.io_cache should be free by this time */
- DBUG_ASSERT(NULL == table->sort.io_cache);
-
- // Assign the copy back!
- table->sort= table_sort;
-
DBUG_PRINT("exit",
- ("num_rows: %ld examined_rows: %ld found_rows: %ld",
- (long) num_rows, (long) *examined_rows, (long) *found_rows));
+ ("num_rows: %lld examined_rows: %lld found_rows: %lld",
+ (longlong) sort->return_rows, (longlong) sort->examined_rows,
+ (longlong) sort->found_rows));
MYSQL_FILESORT_DONE(error, num_rows);
- DBUG_RETURN(error ? HA_POS_ERROR : num_rows);
-} /* filesort */
-
-
-void filesort_free_buffers(TABLE *table, bool full)
-{
- DBUG_ENTER("filesort_free_buffers");
-
- my_free(table->sort.record_pointers);
- table->sort.record_pointers= NULL;
-
- if (unlikely(full))
- {
- table->sort.free_sort_buffer();
- my_free(table->sort.buffpek);
- table->sort.buffpek= NULL;
- table->sort.buffpek_len= 0;
- }
- /* addon_buf is only allocated if addon_field is set */
- if (unlikely(table->sort.addon_field))
+ if (error)
{
- my_free(table->sort.addon_field);
- my_free(table->sort.addon_buf);
- table->sort.addon_buf= NULL;
- table->sort.addon_field= NULL;
+ delete sort;
+ sort= 0;
}
- DBUG_VOID_RETURN;
-}
+ DBUG_RETURN(sort);
+} /* filesort */
/** Read 'count' number of buffer pointers into memory. */
@@ -672,7 +631,7 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
*/
static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
- Filesort_info *fs_info,
+ SORT_INFO *fs_info,
IO_CACHE *buffpek_pointers,
IO_CACHE *tempfile,
Bounded_queue<uchar, uchar> *pq,
@@ -877,7 +836,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
const ha_rows retval=
my_b_inited(tempfile) ?
(ha_rows) (my_b_tell(tempfile)/param->rec_length) : idx;
- DBUG_PRINT("info", ("find_all_keys return %u", (uint) retval));
+ DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) retval));
DBUG_RETURN(retval);
} /* find_all_keys */
@@ -905,7 +864,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
*/
static bool
-write_keys(Sort_param *param, Filesort_info *fs_info, uint count,
+write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
IO_CACHE *buffpek_pointers, IO_CACHE *tempfile)
{
size_t rec_length;
@@ -1274,11 +1233,13 @@ static void register_used_fields(Sort_param *param)
}
-static bool save_index(Sort_param *param, uint count, Filesort_info *table_sort)
+static bool save_index(Sort_param *param, uint count,
+ SORT_INFO *table_sort)
{
uint offset,res_length;
uchar *to;
DBUG_ENTER("save_index");
+ DBUG_ASSERT(table_sort->record_pointers == 0);
table_sort->sort_buffer(param, count);
res_length= param->res_length;
@@ -1327,7 +1288,7 @@ static bool save_index(Sort_param *param, uint count, Filesort_info *table_sort)
*/
bool check_if_pq_applicable(Sort_param *param,
- Filesort_info *filesort_info,
+ SORT_INFO *filesort_info,
TABLE *table, ha_rows num_rows,
ulong memory_available)
{
@@ -1361,9 +1322,8 @@ bool check_if_pq_applicable(Sort_param *param,
// The whole source set fits into memory.
if (param->max_rows < num_rows/PQ_slowness )
{
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length) != NULL);
}
else
{
@@ -1375,9 +1335,8 @@ bool check_if_pq_applicable(Sort_param *param,
// Do we have space for LIMIT rows in memory?
if (param->max_keys_per_buffer < num_available_keys)
{
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length) != NULL);
}
// Try to strip off addon fields.
@@ -1413,17 +1372,14 @@ bool check_if_pq_applicable(Sort_param *param,
if (sort_merge_cost < pq_cost)
DBUG_RETURN(false);
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->sort_length + param->ref_length);
- if (filesort_info->get_sort_keys())
+ if (filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->sort_length +
+ param->ref_length))
{
- // Make attached data to be references instead of fields.
- my_free(filesort_info->addon_buf);
+ /* Make attached data to be references instead of fields. */
my_free(filesort_info->addon_field);
- filesort_info->addon_buf= NULL;
filesort_info->addon_field= NULL;
param->addon_field= NULL;
- param->addon_length= 0;
param->res_length= param->ref_length;
param->sort_length+= param->ref_length;
@@ -1993,7 +1949,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
@param thd Current thread
@param ptabfield Array of references to the table fields
@param sortlength Total length of sorted fields
- @param[out] plength Total length of appended fields
+ @param [out] addon_buf Buffer to us for appended fields
@note
The null bits for the appended values are supposed to be put together
@@ -2007,7 +1963,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
static SORT_ADDON_FIELD *
get_addon_fields(ulong max_length_for_sort_data,
- Field **ptabfield, uint sortlength, uint *plength)
+ Field **ptabfield, uint sortlength, LEX_STRING *addon_buf)
{
Field **pfield;
Field *field;
@@ -2016,6 +1972,7 @@ get_addon_fields(ulong max_length_for_sort_data,
uint fields= 0;
uint null_fields= 0;
MY_BITMAP *read_set= (*ptabfield)->table->read_set;
+ DBUG_ENTER("get_addon_fields");
/*
If there is a reference to a field in the query add it
@@ -2027,31 +1984,33 @@ get_addon_fields(ulong max_length_for_sort_data,
the values directly from sorted fields.
But beware the case when item->cmp_type() != item->result_type()
*/
- *plength= 0;
+ addon_buf->str= 0;
+ addon_buf->length= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
if (!bitmap_is_set(read_set, field->field_index))
continue;
if (field->flags & BLOB_FLAG)
- return 0;
+ DBUG_RETURN(0);
length+= field->max_packed_col_length(field->pack_length());
if (field->maybe_null())
null_fields++;
fields++;
}
if (!fields)
- return 0;
+ DBUG_RETURN(0);
length+= (null_fields+7)/8;
if (length+sortlength > max_length_for_sort_data ||
- !(addonf= (SORT_ADDON_FIELD *) my_malloc(sizeof(SORT_ADDON_FIELD)*
- (fields+1),
- MYF(MY_WME |
- MY_THREAD_SPECIFIC))))
- return 0;
+ !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC),
+ &addonf, sizeof(SORT_ADDON_FIELD) * (fields+1),
+ &addon_buf->str, length,
+ NullS))
- *plength= length;
+ DBUG_RETURN(0);
+
+ addon_buf->length= length;
length= (null_fields+7)/8;
null_fields= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
@@ -2078,7 +2037,7 @@ get_addon_fields(ulong max_length_for_sort_data,
addonf->field= 0; // Put end marker
DBUG_PRINT("info",("addon_length: %d",length));
- return (addonf-fields);
+ DBUG_RETURN(addonf-fields);
}
@@ -2164,3 +2123,13 @@ void change_double_for_sort(double nr,uchar *to)
}
}
+/**
+ Free SORT_INFO
+*/
+
+SORT_INFO::~SORT_INFO()
+{
+ DBUG_ENTER("~SORT_INFO::SORT_INFO()");
+ free_data();
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/filesort.h b/sql/filesort.h
index 22d7f987de2..454c745b5c0 100644
--- a/sql/filesort.h
+++ b/sql/filesort.h
@@ -16,10 +16,8 @@
#ifndef FILESORT_INCLUDED
#define FILESORT_INCLUDED
-class SQL_SELECT;
-
-#include "my_global.h" /* uint, uchar */
#include "my_base.h" /* ha_rows */
+#include "filesort_utils.h"
class SQL_SELECT;
class THD;
@@ -27,12 +25,92 @@ struct TABLE;
struct SORT_FIELD;
class Filesort_tracker;
-ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder,
- uint s_length, SQL_SELECT *select,
- ha_rows max_rows, bool sort_positions,
- ha_rows *examined_rows, ha_rows *found_rows,
- Filesort_tracker* tracker);
-void filesort_free_buffers(TABLE *table, bool full);
+class SORT_INFO
+{
+ /// Buffer for sorting keys.
+ Filesort_buffer filesort_buffer;
+
+public:
+ SORT_INFO()
+ :addon_field(0), record_pointers(0)
+ {
+ buffpek.str= 0;
+ my_b_clear(&io_cache);
+ }
+
+ ~SORT_INFO();
+
+ void free_data()
+ {
+ close_cached_file(&io_cache);
+ my_free(record_pointers);
+ my_free(buffpek.str);
+ my_free(addon_field);
+ }
+
+ void reset()
+ {
+ free_data();
+ record_pointers= 0;
+ buffpek.str= 0;
+ addon_field= 0;
+ }
+
+
+ IO_CACHE io_cache; /* If sorted through filesort */
+ LEX_STRING buffpek; /* Buffer for buffpek structures */
+ LEX_STRING addon_buf; /* Pointer to a buffer if sorted with fields */
+ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
+ /* To unpack back */
+ void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
+ uchar *record_pointers; /* If sorted in memory */
+ /*
+ How many rows in final result.
+ Also how many rows in record_pointers, if used
+ */
+ ha_rows return_rows;
+ ha_rows examined_rows; /* How many rows read */
+ ha_rows found_rows; /* How many rows was accepted */
+
+ /** Sort filesort_buffer */
+ void sort_buffer(Sort_param *param, uint count)
+ { filesort_buffer.sort_buffer(param, count); }
+
+ /**
+ Accessors for Filesort_buffer (which @c).
+ */
+ uchar *get_record_buffer(uint idx)
+ { return filesort_buffer.get_record_buffer(idx); }
+
+ uchar **get_sort_keys()
+ { return filesort_buffer.get_sort_keys(); }
+
+ uchar **alloc_sort_buffer(uint num_records, uint record_length)
+ { return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
+
+ void free_sort_buffer()
+ { filesort_buffer.free_sort_buffer(); }
+
+ void init_record_pointers()
+ { filesort_buffer.init_record_pointers(); }
+
+ size_t sort_buffer_size() const
+ { return filesort_buffer.sort_buffer_size(); }
+
+ friend SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder,
+ uint s_length,
+ SQL_SELECT *select, ha_rows max_rows,
+ bool sort_positions,
+ Filesort_tracker* tracker);
+};
+
+
+SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder,
+ uint s_length,
+ SQL_SELECT *select, ha_rows max_rows,
+ bool sort_positions,
+ Filesort_tracker* tracker);
+
void change_double_for_sort(double nr,uchar *to);
#endif /* FILESORT_INCLUDED */
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index 1cef30b6a56..34110dcfc1f 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -85,31 +85,66 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
return total_cost;
}
-uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, uint record_length)
-{
- ulong sort_buff_sz;
+/*
+ alloc_sort_buffer()
- DBUG_ENTER("alloc_sort_buffer");
+ Allocate buffer for sorting keys.
+ Try to reuse old buffer if possible.
+ @return
+ 0 Error
+ # Pointer to allocated buffer
+*/
+
+uchar **Filesort_buffer::alloc_sort_buffer(uint num_records,
+ uint record_length)
+{
+ size_t buff_size;
+ uchar **sort_keys, **start_of_data;
+ DBUG_ENTER("alloc_sort_buffer");
DBUG_EXECUTE_IF("alloc_sort_buffer_fail",
DBUG_SET("+d,simulate_out_of_memory"););
- if (m_idx_array.is_null())
+ buff_size= num_records * (record_length + sizeof(uchar*));
+ set_if_bigger(buff_size, record_length * MERGEBUFF2);
+
+ if (!m_idx_array.is_null())
{
- sort_buff_sz= num_records * (record_length + sizeof(uchar*));
- set_if_bigger(sort_buff_sz, record_length * MERGEBUFF2);
- uchar **sort_keys=
- (uchar**) my_malloc(sort_buff_sz, MYF(MY_THREAD_SPECIFIC));
- m_idx_array= Idx_array(sort_keys, num_records);
- m_record_length= record_length;
- uchar **start_of_data= m_idx_array.array() + m_idx_array.size();
- m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
+ /*
+ Reuse old buffer if exists and is large enough
+ Note that we don't make the buffer smaller, as we want to be
+ prepared for next subquery iteration.
+ */
+
+ sort_keys= m_idx_array.array();
+ if (buff_size > allocated_size)
+ {
+ /*
+ Better to free and alloc than realloc as we don't have to remember
+ the old values
+ */
+ my_free(sort_keys);
+ if (!(sort_keys= (uchar**) my_malloc(buff_size,
+ MYF(MY_THREAD_SPECIFIC))))
+ {
+ reset();
+ DBUG_RETURN(0);
+ }
+ allocated_size= buff_size;
+ }
}
else
{
- DBUG_ASSERT(num_records == m_idx_array.size());
- DBUG_ASSERT(record_length == m_record_length);
+ if (!(sort_keys= (uchar**) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
+ DBUG_RETURN(0);
+ allocated_size= buff_size;
}
+
+ m_idx_array= Idx_array(sort_keys, num_records);
+ m_record_length= record_length;
+ start_of_data= m_idx_array.array() + m_idx_array.size();
+ m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
+
DBUG_RETURN(m_idx_array.array());
}
@@ -117,8 +152,7 @@ uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, uint record_length)
void Filesort_buffer::free_sort_buffer()
{
my_free(m_idx_array.array());
- m_idx_array= Idx_array();
- m_record_length= 0;
+ m_idx_array.reset();
m_start_of_data= NULL;
}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
index 00fa6f2566b..d537b602edf 100644
--- a/sql/filesort_utils.h
+++ b/sql/filesort_utils.h
@@ -60,9 +60,23 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
class Filesort_buffer
{
public:
- Filesort_buffer() :
- m_idx_array(), m_record_length(0), m_start_of_data(NULL)
+ Filesort_buffer()
+ : m_idx_array(), m_start_of_data(NULL), allocated_size(0)
{}
+
+ ~Filesort_buffer()
+ {
+ my_free(m_idx_array.array());
+ }
+
+ bool is_allocated()
+ {
+ return m_idx_array.array() != 0;
+ }
+ void reset()
+ {
+ m_idx_array.reset();
+ }
/** Sort me... */
void sort_buffer(const Sort_param *param, uint count);
@@ -84,20 +98,12 @@ public:
/// Returns total size: pointer array + record buffers.
size_t sort_buffer_size() const
{
- return m_idx_array.size() * (m_record_length + sizeof(uchar*));
+ return allocated_size;
}
/// Allocates the buffer, but does *not* initialize pointers.
uchar **alloc_sort_buffer(uint num_records, uint record_length);
-
- /// Check <num_records, record_length> for the buffer
- bool check_sort_buffer_properties(uint num_records, uint record_length)
- {
- return (static_cast<uint>(m_idx_array.size()) == num_records &&
- m_record_length == record_length);
- }
-
/// Frees the buffer.
void free_sort_buffer();
@@ -115,15 +121,17 @@ public:
m_idx_array= rhs.m_idx_array;
m_record_length= rhs.m_record_length;
m_start_of_data= rhs.m_start_of_data;
+ allocated_size= rhs.allocated_size;
return *this;
}
private:
typedef Bounds_checked_array<uchar*> Idx_array;
- Idx_array m_idx_array;
+ Idx_array m_idx_array; /* Pointers to key data */
uint m_record_length;
- uchar *m_start_of_data;
+ uchar *m_start_of_data; /* Start of key data */
+ size_t allocated_size;
};
#endif // FILESORT_UTILS_INCLUDED
diff --git a/sql/gcalc_slicescan.h b/sql/gcalc_slicescan.h
index 5a0399bc8da..4996287ca88 100644
--- a/sql/gcalc_slicescan.h
+++ b/sql/gcalc_slicescan.h
@@ -26,7 +26,7 @@
#ifndef GCALC_DBUG_OFF
#define GCALC_DBUG_PRINT(b) DBUG_PRINT("Gcalc", b)
-#define GCALC_DBUG_ENTER(a) DBUG_ENTER("Gcalc "a)
+#define GCALC_DBUG_ENTER(a) DBUG_ENTER("Gcalc " a)
#define GCALC_DBUG_RETURN(r) DBUG_RETURN(r)
#define GCALC_DBUG_VOID_RETURN DBUG_VOID_RETURN
#define GCALC_DBUG_ASSERT(r) DBUG_ASSERT(r)
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 5d789fa31e3..173a5f709c1 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -8306,7 +8306,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table,
/*
Note that this function will try rollback failed ADD INDEX by
executing DROP INDEX for the indexes that were committed (if any)
- before the error occured. This means that the underlying storage
+ before the error occurred. This means that the underlying storage
engine must be able to drop index in-place with X-lock held.
(As X-lock will be held here if new indexes are to be committed)
*/
diff --git a/sql/handler.cc b/sql/handler.cc
index 6919b252e14..2186d389056 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -30,7 +30,7 @@
#include "sql_table.h" // build_table_filename
#include "sql_parse.h" // check_stack_overrun
#include "sql_acl.h" // SUPER_ACL
-#include "sql_base.h" // free_io_cache
+#include "sql_base.h" // TDC_element
#include "discover.h" // extension_based_table_discovery, etc
#include "log_event.h" // *_rows_log_event
#include "create_options.h"
@@ -295,7 +295,7 @@ handler *get_ha_partition(partition_info *part_info)
static const char **handler_errmsgs;
C_MODE_START
-static const char **get_handler_errmsgs()
+static const char **get_handler_errmsgs(int nr)
{
return handler_errmsgs;
}
@@ -324,7 +324,7 @@ int ha_init_errors(void)
/* Set the dedicated error messages. */
SETMSG(HA_ERR_KEY_NOT_FOUND, ER_DEFAULT(ER_KEY_NOT_FOUND));
SETMSG(HA_ERR_FOUND_DUPP_KEY, ER_DEFAULT(ER_DUP_KEY));
- SETMSG(HA_ERR_RECORD_CHANGED, "Update wich is recoverable");
+ SETMSG(HA_ERR_RECORD_CHANGED, "Update which is recoverable");
SETMSG(HA_ERR_WRONG_INDEX, "Wrong index given to function");
SETMSG(HA_ERR_CRASHED, ER_DEFAULT(ER_NOT_KEYFILE));
SETMSG(HA_ERR_WRONG_IN_RECORD, ER_DEFAULT(ER_CRASHED_ON_USAGE));
@@ -386,12 +386,10 @@ int ha_init_errors(void)
*/
static int ha_finish_errors(void)
{
- const char **errmsgs;
-
/* Allocate a pointer array for the error message strings. */
- if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST)))
- return 1;
- my_free(errmsgs);
+ my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST);
+ my_free(handler_errmsgs);
+ handler_errmsgs= 0;
return 0;
}
@@ -3079,6 +3077,7 @@ int handler::update_auto_increment()
if (unlikely(nr == ULONGLONG_MAX))
DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
+ DBUG_ASSERT(nr != 0);
DBUG_PRINT("info",("auto_increment: %llu nb_reserved_values: %llu",
nr, append ? nb_reserved_values : 0));
@@ -5863,8 +5862,6 @@ int handler::ha_reset()
DBUG_ASSERT(table->key_read == 0);
/* ensure that ha_index_end / ha_rnd_end has been called */
DBUG_ASSERT(inited == NONE);
- /* Free cache used by filesort */
- free_io_cache(table);
/* reset the bitmaps to point to defaults */
table->default_column_bitmaps();
pushed_cond= NULL;
diff --git a/sql/handler.h b/sql/handler.h
index 2b535fb34dd..6054ec2db35 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1302,7 +1302,7 @@ struct handlerton
};
/*
- By default (if not implemented by the engine, but the discovery_table() is
+ By default (if not implemented by the engine, but the discover_table() is
implemented) it will perform a file-based discovery:
- if tablefile_extensions[0] is not null, this will discovers all tables
@@ -3626,7 +3626,7 @@ public:
*) a) If the previous step succeeds, handler::ha_commit_inplace_alter_table() is
called to allow the storage engine to do any final updates to its structures,
to make all earlier changes durable and visible to other connections.
- b) If we have failed to upgrade lock or any errors have occured during the
+ b) If we have failed to upgrade lock or any errors have occurred during the
handler functions calls (including commit), we call
handler::ha_commit_inplace_alter_table()
to rollback all changes which were done during previous steps.
diff --git a/sql/item.cc b/sql/item.cc
index e47974408f4..7cdb2d2e7e4 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1259,6 +1259,22 @@ err:
if allowed, otherwise - null.
*/
bzero((char*) ltime,sizeof(*ltime));
+ if (fuzzydate & TIME_TIME_ONLY)
+ {
+ /*
+ In the following scenario:
+ - The caller expected to get a TIME value
+ - Item returned a not NULL string or numeric value
+ - But then conversion from string or number to TIME failed
+ we need to change the default time_type from MYSQL_TIMESTAMP_DATE
+ (which was set in bzero) to MYSQL_TIMESTAMP_TIME and therefore
+ return TIME'00:00:00' rather than DATE'0000-00-00'.
+ If we don't do this, methods like Item::get_time_with_conversion()
+ will erroneously subtract CURRENT_DATE from '0000-00-00 00:00:00'
+ and return TIME'-838:59:59' instead of TIME'00:00:00' as a result.
+ */
+ ltime->time_type= MYSQL_TIMESTAMP_TIME;
+ }
return null_value|= !(fuzzydate & TIME_FUZZY_DATES);
}
@@ -4565,7 +4581,7 @@ bool is_outer_table(TABLE_LIST *table, SELECT_LEX *select)
@retval
0 column fully fixed and fix_fields() should return FALSE
@retval
- -1 error occured
+ -1 error occurred
*/
int
@@ -5437,8 +5453,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst,
if (const char *pos= cannot_convert_error_pos())
{
char buf[16];
- int mblen= srccs->cset->charlen(srccs, (const uchar *) pos,
- (const uchar *) src + src_length);
+ int mblen= my_charlen(srccs, pos, src + src_length);
DBUG_ASSERT(mblen > 0 && mblen * 2 + 1 <= (int) sizeof(buf));
octet2hex(buf, pos, mblen);
push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN,
@@ -6389,6 +6404,7 @@ bool Item::cache_const_expr_analyzer(uchar **arg)
!(basic_const_item() || item->basic_const_item() ||
item->type() == Item::FIELD_ITEM ||
item->type() == SUBSELECT_ITEM ||
+ item->type() == CACHE_ITEM ||
/*
Do not cache GET_USER_VAR() function as its const_item() may
return TRUE for the current thread but it still may change
@@ -6486,7 +6502,7 @@ void Item_field::update_null_value()
UPDATE statement.
RETURN
- 0 if error occured
+ 0 if error occurred
ref if all conditions are met
this field otherwise
*/
diff --git a/sql/item.h b/sql/item.h
index 9954b0ab31d..e42442aa301 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -187,7 +187,7 @@ public:
{
return collation->coll->strnncollsp(collation,
(uchar *) s->ptr(), s->length(),
- (uchar *) t->ptr(), t->length(), 0);
+ (uchar *) t->ptr(), t->length());
}
};
@@ -428,7 +428,7 @@ public:
RETURN
FALSE if parameter value has been set,
- TRUE if error has occured.
+ TRUE if error has occurred.
*/
virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it)= 0;
@@ -660,9 +660,7 @@ protected:
Field *tmp_table_field_from_field_type(TABLE *table,
bool fixed_length,
bool set_blob_packlength);
- Field *create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length,
- uint convert_int_length);
+ Field *create_tmp_field(bool group, TABLE *table, uint convert_int_length);
public:
/*
@@ -1660,16 +1658,13 @@ public:
// used in row subselects to get value of elements
virtual void bring_value() {}
- virtual Field *create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length)
+ virtual Field *create_tmp_field(bool group, TABLE *table)
{
/*
Values with MY_INT32_NUM_DECIMAL_DIGITS digits may or may not fit into
Field_long : make them Field_longlong.
*/
- return create_tmp_field(false, table,
- convert_blob_length,
- MY_INT32_NUM_DECIMAL_DIGITS - 2);
+ return create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS - 2);
}
virtual Item_field *field_for_view_update() { return 0; }
@@ -2738,7 +2733,7 @@ public:
/*
If value for parameter was not set we treat it as non-const
- so noone will use parameters value in fix_fields still
+ so no one will use parameters value in fix_fields still
parameter is constant during execution.
*/
virtual table_map used_tables() const
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 44dc8ff9e09..579cdc10057 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -2246,50 +2246,6 @@ void Item_func_between::print(String *str, enum_query_type query_type)
}
-void
-Item_func_case_abbreviation2::fix_length_and_dec2(Item **args)
-{
- uint32 char_length;
- set_handler_by_field_type(agg_field_type(args, 2, true));
- maybe_null=args[0]->maybe_null || args[1]->maybe_null;
- decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
- unsigned_flag= args[0]->unsigned_flag && args[1]->unsigned_flag;
-
- if (Item_func_case_abbreviation2::result_type() == DECIMAL_RESULT ||
- Item_func_case_abbreviation2::result_type() == INT_RESULT)
- {
- int len0= args[0]->max_char_length() - args[0]->decimals
- - (args[0]->unsigned_flag ? 0 : 1);
-
- int len1= args[1]->max_char_length() - args[1]->decimals
- - (args[1]->unsigned_flag ? 0 : 1);
-
- char_length= MY_MAX(len0, len1) + decimals + (unsigned_flag ? 0 : 1);
- }
- else
- char_length= MY_MAX(args[0]->max_char_length(), args[1]->max_char_length());
-
- switch (Item_func_case_abbreviation2::result_type()) {
- case STRING_RESULT:
- if (count_string_result_length(Item_func_case_abbreviation2::field_type(),
- args, 2))
- return;
- break;
- case DECIMAL_RESULT:
- case REAL_RESULT:
- break;
- case INT_RESULT:
- decimals= 0;
- break;
- case ROW_RESULT:
- case TIME_RESULT:
- DBUG_ASSERT(0);
- }
- fix_char_length(char_length);
-}
-
-
-
uint Item_func_case_abbreviation2::decimal_precision2(Item **args) const
{
int arg0_int_part= args[0]->decimal_int_part();
@@ -2561,8 +2517,15 @@ void Item_func_nullif::update_used_tables()
void
Item_func_nullif::fix_length_and_dec()
{
- if (!args[2]) // Only false if EOM
- return;
+ /*
+ If this is the first invocation of fix_length_and_dec(), create the
+ third argument as a copy of the first. This cannot be done before
+ fix_fields(), because fix_fields() might replace items,
+ for exampe NOT x --> x==0, or (SELECT 1) --> 1.
+ See also class Item_func_nullif declaration.
+ */
+ if (arg_count == 2)
+ args[arg_count++]= args[0];
THD *thd= current_thd;
/*
@@ -2579,7 +2542,8 @@ Item_func_nullif::fix_length_and_dec()
args[0] and args[2] should still point to the same original l_expr.
*/
DBUG_ASSERT(args[0] == args[2] || thd->stmt_arena->is_stmt_execute());
- if (args[0]->type() == SUM_FUNC_ITEM && !thd->lex->context_analysis_only)
+ if (args[0]->type() == SUM_FUNC_ITEM &&
+ !thd->lex->is_ps_or_view_context_analysis())
{
/*
NULLIF(l_expr, r_expr)
@@ -2700,7 +2664,7 @@ Item_func_nullif::fix_length_and_dec()
m_cache= args[0]->cmp_type() == STRING_RESULT ?
new (thd->mem_root) Item_cache_str_for_nullif(thd, args[0]) :
Item_cache::get_cache(thd, args[0]);
- m_cache->setup(current_thd, args[0]);
+ m_cache->setup(thd, args[0]);
m_cache->store(args[0]);
m_cache->set_used_tables(args[0]->used_tables());
thd->change_item_tree(&args[0], m_cache);
@@ -2712,7 +2676,7 @@ Item_func_nullif::fix_length_and_dec()
unsigned_flag= args[2]->unsigned_flag;
fix_char_length(args[2]->max_char_length());
maybe_null=1;
- setup_args_and_comparator(current_thd, &cmp);
+ setup_args_and_comparator(thd, &cmp);
}
@@ -2731,10 +2695,10 @@ void Item_func_nullif::print(String *str, enum_query_type query_type)
Therefore, after equal field propagation args[0] and args[2] can point
to different items.
*/
- if (!(query_type & QT_ITEM_FUNC_NULLIF_TO_CASE) || args[0] == args[2])
+ if ((query_type & QT_ITEM_ORIGINAL_FUNC_NULLIF) || args[0] == args[2])
{
/*
- If no QT_ITEM_FUNC_NULLIF_TO_CASE is requested,
+ If QT_ITEM_ORIGINAL_FUNC_NULLIF is requested,
that means we want the original NULLIF() representation,
e.g. when we are in:
SHOW CREATE {VIEW|FUNCTION|PROCEDURE}
@@ -2742,15 +2706,12 @@ void Item_func_nullif::print(String *str, enum_query_type query_type)
The original representation is possible only if
args[0] and args[2] still point to the same Item.
- The caller must pass call print() with QT_ITEM_FUNC_NULLIF_TO_CASE
+ The caller must never pass call print() with QT_ITEM_ORIGINAL_FUNC_NULLIF
if an expression has undergone some optimization
(e.g. equal field propagation done in optimize_cond()) already and
NULLIF() potentially has two different representations of "a":
- one "a" for comparison
- another "a" for the returned value!
-
- Note, the EXPLAIN EXTENDED and EXPLAIN FORMAT=JSON routines
- do pass QT_ITEM_FUNC_NULLIF_TO_CASE to print().
*/
DBUG_ASSERT(args[0] == args[2] || current_thd->lex->context_analysis_only);
str->append(func_name());
@@ -2948,7 +2909,7 @@ Item *Item_func_case::find_item(String *str)
return else_expr_num != -1 ? args[else_expr_num] : 0;
value_added_map|= 1U << (uint)cmp_type;
}
- if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value)
+ if (cmp_items[(uint)cmp_type]->cmp(args[i]) == FALSE)
return args[i + 1];
}
}
@@ -3066,24 +3027,6 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref)
}
-void Item_func_case::agg_str_lengths(Item* arg)
-{
- fix_char_length(MY_MAX(max_char_length(), arg->max_char_length()));
- set_if_bigger(decimals, arg->decimals);
- unsigned_flag= unsigned_flag && arg->unsigned_flag;
-}
-
-
-void Item_func_case::agg_num_lengths(Item *arg)
-{
- uint len= my_decimal_length_to_precision(arg->max_length, arg->decimals,
- arg->unsigned_flag) - arg->decimals;
- set_if_bigger(max_length, len);
- set_if_bigger(decimals, arg->decimals);
- unsigned_flag= unsigned_flag && arg->unsigned_flag;
-}
-
-
/**
Check if (*place) and new_value points to different Items and call
THD::change_item_tree() if needed.
@@ -3145,18 +3088,7 @@ void Item_func_case::fix_length_and_dec()
}
else
{
- collation.set_numeric();
- max_length=0;
- decimals=0;
- unsigned_flag= TRUE;
- for (uint i= 0; i < ncases; i+= 2)
- agg_num_lengths(args[i + 1]);
- if (else_expr_num != -1)
- agg_num_lengths(args[else_expr_num]);
- max_length= my_decimal_precision_to_length_no_truncation(max_length +
- decimals,
- decimals,
- unsigned_flag);
+ fix_attributes(agg, nagg);
}
/*
@@ -3458,23 +3390,25 @@ my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value)
}
-void Item_func_coalesce::fix_length_and_dec()
+void Item_hybrid_func::fix_attributes(Item **items, uint nitems)
{
- set_handler_by_field_type(agg_field_type(args, arg_count, true));
- switch (Item_func_coalesce::result_type()) {
+ switch (Item_hybrid_func::result_type()) {
case STRING_RESULT:
- if (count_string_result_length(Item_func_coalesce::field_type(),
- args, arg_count))
+ if (count_string_result_length(Item_hybrid_func::field_type(),
+ items, nitems))
return;
break;
case DECIMAL_RESULT:
- count_decimal_length();
+ collation.set_numeric();
+ count_decimal_length(items, nitems);
break;
case REAL_RESULT:
- count_real_length();
+ collation.set_numeric();
+ count_real_length(items, nitems);
break;
case INT_RESULT:
- count_only_length(args, arg_count);
+ collation.set_numeric();
+ count_only_length(items, nitems);
decimals= 0;
break;
case ROW_RESULT:
@@ -3606,11 +3540,11 @@ static int cmp_decimal(void *cmp_arg, my_decimal *a, my_decimal *b)
}
-int in_vector::find(Item *item)
+bool in_vector::find(Item *item)
{
uchar *result=get_value(item);
if (!result || !used_count)
- return 0; // Null value
+ return false; // Null value
uint start,end;
start=0; end=used_count-1;
@@ -3619,13 +3553,13 @@ int in_vector::find(Item *item)
uint mid=(start+end+1)/2;
int res;
if ((res=(*compare)(collation, base+mid*size, result)) == 0)
- return 1;
+ return true;
if (res < 0)
start=mid;
else
end=mid-1;
}
- return (int) ((*compare)(collation, base+start*size, result) == 0);
+ return ((*compare)(collation, base+start*size, result) == 0);
}
in_string::in_string(THD *thd, uint elements, qsort2_cmp cmp_func,
@@ -3957,14 +3891,20 @@ int cmp_item_row::cmp(Item *arg)
arg->bring_value();
for (uint i=0; i < n; i++)
{
- if (comparators[i]->cmp(arg->element_index(i)))
+ const int rc= comparators[i]->cmp(arg->element_index(i));
+ switch (rc)
{
- if (!arg->element_index(i)->null_value)
- return 1;
- was_null= 1;
+ case UNKNOWN:
+ was_null= true;
+ break;
+ case TRUE:
+ return TRUE;
+ case FALSE:
+ break; // elements #i are equal
}
+ arg->null_value|= arg->element_index(i)->null_value;
}
- return (arg->null_value= was_null);
+ return was_null ? UNKNOWN : FALSE;
}
@@ -3987,15 +3927,15 @@ void cmp_item_decimal::store_value(Item *item)
/* val may be zero if item is nnull */
if (val && val != &value)
my_decimal2decimal(val, &value);
+ m_null_value= item->null_value;
}
int cmp_item_decimal::cmp(Item *arg)
{
my_decimal tmp_buf, *tmp= arg->val_decimal(&tmp_buf);
- if (arg->null_value)
- return 1;
- return my_decimal_cmp(&value, tmp);
+ return (m_null_value || arg->null_value) ?
+ UNKNOWN : (my_decimal_cmp(&value, tmp) != 0);
}
@@ -4019,12 +3959,14 @@ void cmp_item_datetime::store_value(Item *item)
enum_field_types f_type=
tmp_item[0]->field_type_for_temporal_comparison(warn_item);
value= get_datetime_value(thd, &tmp_item, &lval_cache, f_type, &is_null);
+ m_null_value= item->null_value;
}
int cmp_item_datetime::cmp(Item *arg)
{
- return value != arg->val_temporal_packed(warn_item);
+ const bool rc= value != arg->val_temporal_packed(warn_item);
+ return (m_null_value || arg->null_value) ? UNKNOWN : rc;
}
@@ -4048,10 +3990,10 @@ bool Item_func_in::count_sargable_conds(uchar *arg)
}
-bool Item_func_in::nulls_in_row()
+bool Item_func_in::list_contains_null()
{
Item **arg,**arg_end;
- for (arg= args+1, arg_end= args+arg_count; arg != arg_end ; arg++)
+ for (arg= args + 1, arg_end= args+arg_count; arg != arg_end ; arg++)
{
if ((*arg)->null_inside())
return 1;
@@ -4132,7 +4074,7 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
{
return cs->coll->strnncollsp(cs,
(uchar *) x->ptr(),x->length(),
- (uchar *) y->ptr(),y->length(), 0);
+ (uchar *) y->ptr(),y->length());
}
void Item_func_in::fix_length_and_dec()
@@ -4166,6 +4108,32 @@ void Item_func_in::fix_length_and_dec()
}
}
+ /*
+ First conditions for bisection to be possible:
+ 1. All types are similar, and
+ 2. All expressions in <in value list> are const
+ */
+ bool bisection_possible=
+ type_cnt == 1 && // 1
+ const_itm; // 2
+ if (bisection_possible)
+ {
+ /*
+ In the presence of NULLs, the correct result of evaluating this item
+ must be UNKNOWN or FALSE. To achieve that:
+ - If type is scalar, we can use bisection and the "have_null" boolean.
+ - If type is ROW, we will need to scan all of <in value list> when
+ searching, so bisection is impossible. Unless:
+ 3. UNKNOWN and FALSE are equivalent results
+ 4. Neither left expression nor <in value list> contain any NULL value
+ */
+
+ if (m_compare_type == ROW_RESULT &&
+ ((!is_top_level_item() || negated) && // 3
+ (list_contains_null() || args[0]->maybe_null))) // 4
+ bisection_possible= false;
+ }
+
if (type_cnt == 1)
{
if (m_compare_type == STRING_RESULT &&
@@ -4178,7 +4146,7 @@ void Item_func_in::fix_length_and_dec()
uint cols= args[0]->cols();
cmp_item_row *cmp= 0;
- if (const_itm && !nulls_in_row())
+ if (bisection_possible)
{
array= new (thd->mem_root) in_row(thd, arg_count-1, 0);
cmp= &((in_row*)array)->tmp;
@@ -4207,11 +4175,8 @@ void Item_func_in::fix_length_and_dec()
}
}
}
- /*
- Row item with NULLs inside can return NULL or FALSE =>
- they can't be processed as static
- */
- if (type_cnt == 1 && const_itm && !nulls_in_row())
+
+ if (bisection_possible)
{
/*
IN must compare INT columns and constants as int values (the same
@@ -4267,20 +4232,25 @@ void Item_func_in::fix_length_and_dec()
array= new (thd->mem_root) in_datetime(thd, date_arg, arg_count - 1);
break;
}
- if (array && !(thd->is_fatal_error)) // If not EOM
+ if (!array || thd->is_fatal_error) // OOM
+ return;
+ uint j=0;
+ for (uint i=1 ; i < arg_count ; i++)
{
- uint j=0;
- for (uint i=1 ; i < arg_count ; i++)
+ array->set(j,args[i]);
+ if (!args[i]->null_value)
+ j++; // include this cell in the array.
+ else
{
- array->set(j,args[i]);
- if (!args[i]->null_value) // Skip NULL values
- j++;
- else
- have_null= 1;
+ /*
+ We don't put NULL values in array, to avoid erronous matches in
+ bisection.
+ */
+ have_null= 1;
}
- if ((array->used_count= j))
- array->sort();
}
+ if ((array->used_count= j))
+ array->sort();
}
else
{
@@ -4348,7 +4318,14 @@ longlong Item_func_in::val_int()
uint value_added_map= 0;
if (array)
{
- int tmp=array->find(args[0]);
+ bool tmp=array->find(args[0]);
+ /*
+ NULL on left -> UNKNOWN.
+ Found no match, and NULL on right -> UNKNOWN.
+ NULL on right can never give a match, as it is not stored in
+ array.
+ See also the 'bisection_possible' variable in fix_length_and_dec().
+ */
null_value=args[0]->null_value || (!tmp && have_null);
return (longlong) (!null_value && tmp != negated);
}
@@ -4370,13 +4347,12 @@ longlong Item_func_in::val_int()
if (!(value_added_map & (1U << (uint)cmp_type)))
{
in_item->store_value(args[0]);
- if ((null_value= args[0]->null_value))
- return 0;
value_added_map|= 1U << (uint)cmp_type;
}
- if (!in_item->cmp(args[i]) && !args[i]->null_value)
+ const int rc= in_item->cmp(args[i]);
+ if (rc == FALSE)
return (longlong) (!negated);
- have_null|= args[i]->null_value;
+ have_null|= (rc == UNKNOWN);
}
null_value= have_null;
@@ -5786,7 +5762,7 @@ bool Item_func_not::fix_fields(THD *thd, Item **ref)
args[0]->under_not(this);
if (args[0]->type() == FIELD_ITEM)
{
- /* replace "NOT <field>" with "<filed> == 0" */
+ /* replace "NOT <field>" with "<field> == 0" */
Query_arena backup, *arena;
Item *new_item;
bool rc= TRUE;
@@ -6498,7 +6474,8 @@ longlong Item_equal::val_int()
/* Skip fields of tables that has not been read yet */
if (!field->table->status || (field->table->status & STATUS_NULL_ROW))
{
- if (eval_item->cmp(item) || (null_value= item->null_value))
+ const int rc= eval_item->cmp(item);
+ if ((rc == TRUE) || (null_value= (rc == UNKNOWN)))
return 0;
}
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 717e9e59ddf..2d197a86d9b 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -795,6 +795,7 @@ public:
public:
inline void negate() { negated= !negated; }
inline void top_level_item() { pred_level= 1; }
+ bool is_top_level_item() const { return pred_level; }
Item *neg_transformer(THD *thd)
{
negated= !negated;
@@ -901,7 +902,11 @@ public:
String *str_op(String *);
my_decimal *decimal_op(my_decimal *);
bool date_op(MYSQL_TIME *ltime,uint fuzzydate);
- void fix_length_and_dec();
+ void fix_length_and_dec()
+ {
+ set_handler_by_field_type(agg_field_type(args, arg_count, true));
+ fix_attributes(args, arg_count);
+ }
const char *func_name() const { return "coalesce"; }
table_map not_null_tables() const { return 0; }
};
@@ -914,13 +919,18 @@ public:
*/
class Item_func_case_abbreviation2 :public Item_func_hybrid_field_type
{
+protected:
+ void fix_length_and_dec2(Item **items)
+ {
+ set_handler_by_field_type(agg_field_type(items, 2, true));
+ fix_attributes(items, 2);
+ }
+ uint decimal_precision2(Item **args) const;
public:
Item_func_case_abbreviation2(THD *thd, Item *a, Item *b):
Item_func_hybrid_field_type(thd, a, b) { }
Item_func_case_abbreviation2(THD *thd, Item *a, Item *b, Item *c):
Item_func_hybrid_field_type(thd, a, b, c) { }
- void fix_length_and_dec2(Item **args);
- uint decimal_precision2(Item **args) const;
};
@@ -999,11 +1009,18 @@ class Item_func_nullif :public Item_func_hybrid_field_type
Item_cache *m_cache;
int compare();
public:
- // Put "a" to args[0] for comparison and to args[2] for the returned value.
+ /*
+ Here we pass three arguments to the parent constructor, as NULLIF
+ is a three-argument function, it needs two copies of the first argument
+ (see above). But fix_fields() will be confused if we try to prepare the
+ same Item twice (if args[0]==args[2]), so we hide the third argument
+ (decrementing arg_count) and copy args[2]=args[0] again after fix_fields().
+ See also Item_func_nullif::fix_length_and_dec().
+ */
Item_func_nullif(THD *thd, Item *a, Item *b):
Item_func_hybrid_field_type(thd, a, b, a),
m_cache(NULL)
- {}
+ { arg_count--; }
bool date_op(MYSQL_TIME *ltime, uint fuzzydate);
double real_op();
longlong int_op();
@@ -1060,7 +1077,7 @@ public:
{
my_qsort2(base,used_count,size,compare,(void*)collation);
}
- int find(Item *item);
+ bool find(Item *item);
/*
Create an instance of Item_{type} (e.g. Item_decimal) constant object
@@ -1228,6 +1245,10 @@ public:
cmp_item() { cmp_charset= &my_charset_bin; }
virtual ~cmp_item() {}
virtual void store_value(Item *item)= 0;
+ /**
+ @returns result (TRUE, FALSE or UNKNOWN) of
+ "stored argument's value <> item's value"
+ */
virtual int cmp(Item *item)= 0;
// for optimized IN with row
virtual int compare(cmp_item *item)= 0;
@@ -1240,7 +1261,14 @@ public:
}
};
-class cmp_item_string :public cmp_item
+/// cmp_item which stores a scalar (i.e. non-ROW).
+class cmp_item_scalar : public cmp_item
+{
+protected:
+ bool m_null_value; ///< If stored value is NULL
+};
+
+class cmp_item_string : public cmp_item_scalar
{
protected:
String *value_res;
@@ -1266,14 +1294,20 @@ public:
void store_value(Item *item)
{
value_res= item->val_str(&value);
+ m_null_value= item->null_value;
}
int cmp(Item *arg)
{
char buff[STRING_BUFFER_USUAL_SIZE];
- String tmp(buff, sizeof(buff), cmp_charset), *res;
- res= arg->val_str(&tmp);
- return (value_res ? (res ? sortcmp(value_res, res, cmp_charset) : 1) :
- (res ? -1 : 0));
+ String tmp(buff, sizeof(buff), cmp_charset), *res= arg->val_str(&tmp);
+ if (m_null_value || arg->null_value)
+ return UNKNOWN;
+ if (value_res && res)
+ return sortcmp(value_res, res, cmp_charset) != 0;
+ else if (!value_res && !res)
+ return FALSE;
+ else
+ return TRUE;
}
int compare(cmp_item *ci)
{
@@ -1288,7 +1322,7 @@ public:
}
};
-class cmp_item_int :public cmp_item
+class cmp_item_int : public cmp_item_scalar
{
longlong value;
public:
@@ -1296,10 +1330,12 @@ public:
void store_value(Item *item)
{
value= item->val_int();
+ m_null_value= item->null_value;
}
int cmp(Item *arg)
{
- return value != arg->val_int();
+ const bool rc= value != arg->val_int();
+ return (m_null_value || arg->null_value) ? UNKNOWN : rc;
}
int compare(cmp_item *ci)
{
@@ -1315,7 +1351,7 @@ public:
If the left item is a constant one then its value is cached in the
lval_cache variable.
*/
-class cmp_item_datetime :public cmp_item
+class cmp_item_datetime : public cmp_item_scalar
{
longlong value;
public:
@@ -1333,7 +1369,7 @@ public:
cmp_item *make_same();
};
-class cmp_item_real :public cmp_item
+class cmp_item_real : public cmp_item_scalar
{
double value;
public:
@@ -1341,10 +1377,12 @@ public:
void store_value(Item *item)
{
value= item->val_real();
+ m_null_value= item->null_value;
}
int cmp(Item *arg)
{
- return value != arg->val_real();
+ const bool rc= value != arg->val_real();
+ return (m_null_value || arg->null_value) ? UNKNOWN : rc;
}
int compare(cmp_item *ci)
{
@@ -1355,7 +1393,7 @@ public:
};
-class cmp_item_decimal :public cmp_item
+class cmp_item_decimal : public cmp_item_scalar
{
my_decimal value;
public:
@@ -1382,12 +1420,13 @@ public:
void store_value(Item *item)
{
value_res= item->val_str(&value);
+ m_null_value= item->null_value;
}
int cmp(Item *item)
{
// Should never be called
- DBUG_ASSERT(0);
- return 1;
+ DBUG_ASSERT(false);
+ return TRUE;
}
int compare(cmp_item *ci)
{
@@ -1448,39 +1487,47 @@ public:
Item *find_item(String *str);
CHARSET_INFO *compare_collation() const { return cmp_collation.collation; }
void cleanup();
- void agg_str_lengths(Item *arg);
- void agg_num_lengths(Item *arg);
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond);
};
/*
- The Item_func_in class implements the in_expr IN(values_list) function.
+ The Item_func_in class implements
+ in_expr IN (<in value list>)
+ and
+ in_expr NOT IN (<in value list>)
The current implementation distinguishes 2 cases:
- 1) all items in the value_list are constants and have the same
+ 1) all items in <in value list> are constants and have the same
result type. This case is handled by in_vector class.
- 2) items in the value_list have different result types or there is some
- non-constant items.
- In this case Item_func_in employs several cmp_item objects to performs
- comparisons of in_expr and an item from the values_list. One cmp_item
+ 2) otherwise Item_func_in employs several cmp_item objects to perform
+ comparisons of in_expr and an item from <in value list>. One cmp_item
object for each result type. Different result types are collected in the
fix_length_and_dec() member function by means of collect_cmp_types()
function.
*/
class Item_func_in :public Item_func_opt_neg
{
+ /**
+ Usable if <in value list> is made only of constants. Returns true if one
+ of these constants contains a NULL. Example:
+ IN ( (-5, (12,NULL)), ... ).
+ */
+ bool list_contains_null();
protected:
SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param,
Field *field, Item *value);
public:
- /*
- an array of values when the right hand arguments of IN
- are all SQL constant and there are no nulls
- */
+ /// An array of values, created when the bisection lookup method is used
in_vector *array;
+ /**
+ If there is some NULL among <in value list>, during a val_int() call; for
+ example
+ IN ( (1,(3,'col')), ... ), where 'col' is a column which evaluates to
+ NULL.
+ */
bool have_null;
- /*
- true when all arguments of the IN clause are of compatible types
+ /**
+ true when all arguments of the IN list are of compatible types
and can be used safely as comparisons for key conditions
*/
bool arg_types_compatible;
@@ -1534,7 +1581,6 @@ public:
virtual void print(String *str, enum_query_type query_type);
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
- bool nulls_in_row();
bool eval_not_null_tables(uchar *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
bool count_sargable_conds(uchar *arg);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 4b376706500..50b6f4a6b68 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -232,7 +232,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
}
}
fix_length_and_dec();
- if (thd->is_error()) // An error inside fix_length_and_dec occured
+ if (thd->is_error()) // An error inside fix_length_and_dec occurred
return TRUE;
fixed= 1;
return FALSE;
@@ -575,18 +575,19 @@ void Item_udf_func::fix_num_length_and_dec()
@retval False on success, true on error.
*/
-void Item_func::count_datetime_length(Item **item, uint nitems)
+void Item_func::count_datetime_length(enum_field_types field_type_arg,
+ Item **item, uint nitems)
{
unsigned_flag= 0;
decimals= 0;
- if (field_type() != MYSQL_TYPE_DATE)
+ if (field_type_arg != MYSQL_TYPE_DATE)
{
for (uint i= 0; i < nitems; i++)
set_if_bigger(decimals, item[i]->decimals);
}
set_if_smaller(decimals, TIME_SECOND_PART_DIGITS);
uint len= decimals ? (decimals + 1) : 0;
- len+= mysql_temporal_int_part_length(field_type());
+ len+= mysql_temporal_int_part_length(field_type_arg);
fix_char_length(len);
}
@@ -595,16 +596,16 @@ void Item_func::count_datetime_length(Item **item, uint nitems)
result length/precision depends on argument ones.
*/
-void Item_func::count_decimal_length()
+void Item_func::count_decimal_length(Item **item, uint nitems)
{
int max_int_part= 0;
decimals= 0;
unsigned_flag= 1;
- for (uint i=0 ; i < arg_count ; i++)
+ for (uint i=0 ; i < nitems ; i++)
{
- set_if_bigger(decimals, args[i]->decimals);
- set_if_bigger(max_int_part, args[i]->decimal_int_part());
- set_if_smaller(unsigned_flag, args[i]->unsigned_flag);
+ set_if_bigger(decimals, item[i]->decimals);
+ set_if_bigger(max_int_part, item[i]->decimal_int_part());
+ set_if_smaller(unsigned_flag, item[i]->unsigned_flag);
}
int precision= MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
fix_char_length(my_decimal_precision_to_length_no_truncation(precision,
@@ -635,19 +636,20 @@ void Item_func::count_only_length(Item **item, uint nitems)
result length/precision depends on argument ones.
*/
-void Item_func::count_real_length()
+void Item_func::count_real_length(Item **items, uint nitems)
{
uint32 length= 0;
decimals= 0;
max_length= 0;
- for (uint i=0 ; i < arg_count ; i++)
+ unsigned_flag= false;
+ for (uint i=0 ; i < nitems ; i++)
{
if (decimals != NOT_FIXED_DEC)
{
- set_if_bigger(decimals, args[i]->decimals);
- set_if_bigger(length, (args[i]->max_length - args[i]->decimals));
+ set_if_bigger(decimals, items[i]->decimals);
+ set_if_bigger(length, (items[i]->max_length - items[i]->decimals));
}
- set_if_bigger(max_length, args[i]->max_length);
+ set_if_bigger(max_length, items[i]->max_length);
}
if (decimals != NOT_FIXED_DEC)
{
@@ -676,11 +678,11 @@ bool Item_func::count_string_result_length(enum_field_types field_type_arg,
if (agg_arg_charsets_for_string_result(collation, items, nitems, 1))
return true;
if (is_temporal_type(field_type_arg))
- count_datetime_length(items, nitems);
+ count_datetime_length(field_type_arg, items, nitems);
else
{
- decimals= NOT_FIXED_DEC;
count_only_length(items, nitems);
+ decimals= max_length ? NOT_FIXED_DEC : 0;
}
return false;
}
@@ -755,7 +757,7 @@ void Item_num_op::fix_length_and_dec(void)
if (r0 == REAL_RESULT || r1 == REAL_RESULT ||
r0 == STRING_RESULT || r1 ==STRING_RESULT)
{
- count_real_length();
+ count_real_length(args, arg_count);
max_length= float_length(decimals);
set_handler_by_result_type(REAL_RESULT);
}
@@ -5613,7 +5615,7 @@ void Item_func_get_user_var::fix_length_and_dec()
/*
If the variable didn't exist it has been created as a STRING-type.
- 'm_var_entry' is NULL only if there occured an error during the call to
+ 'm_var_entry' is NULL only if there occurred an error during the call to
get_var_with_binlog.
*/
if (!error && m_var_entry)
diff --git a/sql/item_func.h b/sql/item_func.h
index 5e1fa753f34..2ce199b3565 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -41,6 +41,14 @@ protected:
*/
uint allowed_arg_cols;
String *val_str_from_val_str_ascii(String *str, String *str2);
+
+ void count_only_length(Item **item, uint nitems);
+ void count_real_length(Item **item, uint nitems);
+ void count_decimal_length(Item **item, uint nitems);
+ void count_datetime_length(enum_field_types field_type,
+ Item **item, uint nitems);
+ bool count_string_result_length(enum_field_types field_type,
+ Item **item, uint nitems);
public:
table_map not_null_tables_cache;
@@ -148,16 +156,10 @@ public:
virtual void print(String *str, enum_query_type query_type);
void print_op(String *str, enum_query_type query_type);
void print_args(String *str, uint from, enum_query_type query_type);
- void count_only_length(Item **item, uint nitems);
- void count_real_length();
- void count_decimal_length();
inline bool get_arg0_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
return (null_value=args[0]->get_date_with_conversion(ltime, fuzzy_date));
}
- void count_datetime_length(Item **item, uint nitems);
- bool count_string_result_length(enum_field_types field_type,
- Item **item, uint nitems);
inline bool get_arg0_time(MYSQL_TIME *ltime)
{
null_value= args[0]->get_time(ltime);
@@ -175,7 +177,7 @@ public:
{
DBUG_ASSERT(thd == table->in_use);
return result_type() != STRING_RESULT ?
- create_tmp_field(false, table, 0, MY_INT32_NUM_DECIMAL_DIGITS) :
+ create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) :
tmp_table_field_from_field_type(table, false, false);
}
Item *get_tmp_table_item(THD *thd);
@@ -387,6 +389,8 @@ public:
class Item_hybrid_func: public Item_func,
public Type_handler_hybrid_field_type
{
+protected:
+ void fix_attributes(Item **item, uint nitems);
public:
Item_hybrid_func(THD *thd): Item_func(thd) { }
Item_hybrid_func(THD *thd, Item *a): Item_func(thd, a) { }
@@ -1766,7 +1770,7 @@ public:
Field *create_field_for_create_select(THD *thd, TABLE *table)
{
return result_type() != STRING_RESULT ?
- create_tmp_field(false, table, 0, MY_INT32_NUM_DECIMAL_DIGITS) :
+ create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) :
tmp_table_field_from_field_type(table, false, true);
}
table_map used_tables() const
diff --git a/sql/item_row.h b/sql/item_row.h
index bbb9a7f1f96..153a6f085b3 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -2,7 +2,7 @@
#define ITEM_ROW_INCLUDED
/*
- Copyright (c) 2002, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -28,11 +28,20 @@
@endverbatim
*/
+
+/**
+ Item which stores (x,y,...) and ROW(x,y,...).
+ Note that this can be recursive: ((x,y),(z,t)) is a ROW of ROWs.
+*/
class Item_row: public Item,
private Item_args,
private Used_tables_and_const_cache
{
table_map not_null_tables_cache;
+ /**
+ If elements are made only of constants, of which one or more are
+ NULL. For example, this item is (1,2,NULL), or ( (1,NULL), (2,3) ).
+ */
bool with_null;
public:
Item_row(THD *thd, List<Item> &list):
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index f22716d3d81..8745baa8c69 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -58,6 +58,8 @@ Item_subselect::Item_subselect(THD *thd_arg):
{
DBUG_ENTER("Item_subselect::Item_subselect");
DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this));
+ sortbuffer.str= 0;
+
#ifndef DBUG_OFF
exec_counter= 0;
#endif
@@ -153,6 +155,9 @@ void Item_subselect::cleanup()
if (engine)
engine->cleanup();
reset();
+ filesort_buffer.free_sort_buffer();
+ my_free(sortbuffer.str);
+
value_assigned= 0;
expr_cache= 0;
forced_const= FALSE;
@@ -5916,7 +5921,7 @@ int subselect_partial_match_engine::exec()
/* Search for a complete match. */
if ((lookup_res= lookup_engine->index_lookup()))
{
- /* An error occured during lookup(). */
+ /* An error occurred during lookup(). */
item_in->value= 0;
item_in->null_value= 0;
return lookup_res;
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 1b450044954..58b5a948048 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -95,6 +95,9 @@ public:
subselect_engine *engine;
/* unit of subquery */
st_select_lex_unit *unit;
+ /* Cached buffers used when calling filesort in sub queries */
+ Filesort_buffer filesort_buffer;
+ LEX_STRING sortbuffer;
/* A reference from inside subquery predicate to somewhere outside of it */
class Ref_to_outside : public Sql_alloc
{
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 5b98ea801f0..0c85cf53e18 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -29,6 +29,7 @@
#include <my_global.h>
#include "sql_priv.h"
#include "sql_select.h"
+#include "uniques.h"
/**
Calculate the affordable RAM limit for structures like TREE or Unique
@@ -1187,8 +1188,7 @@ void Item_sum_hybrid::setup_hybrid(THD *thd, Item *item, Item *value_arg)
}
-Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length)
+Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table)
{
Field *field;
MEM_ROOT *mem_root;
@@ -1196,9 +1196,9 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
if (args[0]->type() == Item::FIELD_ITEM)
{
field= ((Item_field*) args[0])->field;
-
+
if ((field= create_tmp_field_from_field(table->in_use, field, name, table,
- NULL, convert_blob_length)))
+ NULL)))
field->flags&= ~NOT_NULL_FLAG;
return field;
}
@@ -1224,7 +1224,7 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
Field::NONE, name, decimals);
break;
default:
- return Item_sum::create_tmp_field(group, table, convert_blob_length);
+ return Item_sum::create_tmp_field(group, table);
}
if (field)
field->init(table);
@@ -1582,8 +1582,7 @@ Item *Item_sum_avg::copy_or_same(THD* thd)
}
-Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
- uint convert_blob_len)
+Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table)
{
Field *field;
MEM_ROOT *mem_root= table->in_use->mem_root;
@@ -1809,8 +1808,7 @@ Item *Item_sum_variance::copy_or_same(THD* thd)
If we're grouping, then we need some space to serialize variables into, to
pass around.
*/
-Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
- uint convert_blob_len)
+Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table)
{
Field *field;
if (group)
@@ -2991,6 +2989,11 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
ER_THD(thd, ER_CUT_VALUE_GROUP_CONCAT),
item->row_count);
+ /**
+ To avoid duplicated warnings in Item_func_group_concat::val_str()
+ */
+ if (table && table->blob_storage)
+ table->blob_storage->set_truncated_value(false);
return 1;
}
return 0;
@@ -3128,6 +3131,8 @@ void Item_func_group_concat::cleanup()
if (table)
{
THD *thd= table->in_use;
+ if (table->blob_storage)
+ delete table->blob_storage;
free_tmp_table(thd, table);
table= 0;
if (tree)
@@ -3195,6 +3200,8 @@ void Item_func_group_concat::clear()
reset_tree(tree);
if (unique_filter)
unique_filter->reset();
+ if (table && table->blob_storage)
+ table->blob_storage->reset();
/* No need to reset the table as we never call write_row */
}
@@ -3321,6 +3328,7 @@ bool Item_func_group_concat::setup(THD *thd)
{
List<Item> list;
SELECT_LEX *select_lex= thd->lex->current_select;
+ const bool order_or_distinct= MY_TEST(arg_count_order > 0 || distinct);
DBUG_ENTER("Item_func_group_concat::setup");
/*
@@ -3333,9 +3341,6 @@ bool Item_func_group_concat::setup(THD *thd)
if (!(tmp_table_param= new TMP_TABLE_PARAM))
DBUG_RETURN(TRUE);
- /* We'll convert all blobs to varchar fields in the temporary table */
- tmp_table_param->convert_blob_length= max_length *
- collation.collation->mbmaxlen;
/* Push all not constant fields to the list and create a temp table */
always_null= 0;
for (uint i= 0; i < arg_count_field; i++)
@@ -3375,18 +3380,9 @@ bool Item_func_group_concat::setup(THD *thd)
count_field_types(select_lex, tmp_table_param, all_fields, 0);
tmp_table_param->force_copy_fields= force_copy_fields;
DBUG_ASSERT(table == 0);
- if (arg_count_order > 0 || distinct)
+ if (order_or_distinct)
{
/*
- Currently we have to force conversion of BLOB values to VARCHAR's
- if we are to store them in TREE objects used for ORDER BY and
- DISTINCT. This leads to truncation if the BLOB's size exceeds
- Field_varstring::MAX_SIZE.
- */
- set_if_smaller(tmp_table_param->convert_blob_length,
- Field_varstring::MAX_SIZE);
-
- /*
Force the create_tmp_table() to convert BIT columns to INT
as we cannot compare two table records containg BIT fields
stored in the the tree used for distinct/order by.
@@ -3419,6 +3415,13 @@ bool Item_func_group_concat::setup(THD *thd)
table->file->extra(HA_EXTRA_NO_ROWS);
table->no_rows= 1;
+ /**
+ Initialize blob_storage if GROUP_CONCAT is used
+ with ORDER BY | DISTINCT and BLOB field count > 0.
+ */
+ if (order_or_distinct && table->s->blob_fields)
+ table->blob_storage= new Blob_mem_storage();
+
/*
Need sorting or uniqueness: init tree and choose a function to sort.
Don't reserve space for NULLs: if any of gconcat arguments is NULL,
@@ -3471,6 +3474,16 @@ String* Item_func_group_concat::val_str(String* str)
if (no_appended && tree)
/* Tree is used for sorting as in ORDER BY */
tree_walk(tree, &dump_leaf_key, this, left_root_right);
+
+ if (table && table->blob_storage &&
+ table->blob_storage->is_truncated_value())
+ {
+ warning_for_row= true;
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_CUT_VALUE_GROUP_CONCAT, ER(ER_CUT_VALUE_GROUP_CONCAT),
+ row_count);
+ }
+
return &result;
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 68b034c9213..811e9d5c59c 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -481,11 +481,9 @@ public:
}
virtual void make_unique() { force_copy_fields= TRUE; }
Item *get_tmp_table_item(THD *thd);
- Field *create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length)
+ Field *create_tmp_field(bool group, TABLE *table)
{
- return Item::create_tmp_field(group, table, convert_blob_length,
- MY_INT32_NUM_DECIMAL_DIGITS);
+ return Item::create_tmp_field(group, table, MY_INT32_NUM_DECIMAL_DIGITS);
}
virtual bool collect_outer_ref_processor(uchar *param);
bool init_sum_func_check(THD *thd);
@@ -855,7 +853,7 @@ public:
return has_with_distinct() ? "avg(distinct " : "avg(";
}
Item *copy_or_same(THD* thd);
- Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length);
+ Field *create_tmp_field(bool group, TABLE *table);
void cleanup()
{
count= 0;
@@ -911,7 +909,7 @@ public:
const char *func_name() const
{ return sample ? "var_samp(" : "variance("; }
Item *copy_or_same(THD* thd);
- Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length);
+ Field *create_tmp_field(bool group, TABLE *table);
enum Item_result result_type () const { return REAL_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE;}
void cleanup()
@@ -989,8 +987,7 @@ protected:
bool any_value() { return was_values; }
void no_rows_in_result();
void restore_to_before_no_rows_in_result();
- Field *create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length);
+ Field *create_tmp_field(bool group, TABLE *table);
};
@@ -1096,7 +1093,6 @@ public:
fixed= true;
}
table_map used_tables() const { return (table_map) 1L; }
- Field *get_tmp_table_field() { DBUG_ASSERT(0); return NULL; }
void set_result_field(Field *) { DBUG_ASSERT(0); }
void save_in_result_field(bool no_conversions) { DBUG_ASSERT(0); }
};
diff --git a/sql/key.cc b/sql/key.cc
index 1b00e951de0..31b65adabe9 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -328,7 +328,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
}
if (cs->coll->strnncollsp(cs,
(const uchar*) key, length,
- (const uchar*) pos, char_length, 0))
+ (const uchar*) pos, char_length))
return 1;
continue;
}
@@ -891,8 +891,7 @@ bool key_buf_cmp(KEY *key_info, uint used_key_parts,
if (length1 != length2 ||
cs->coll->strnncollsp(cs,
pos1 + pack_length, byte_len1,
- pos2 + pack_length, byte_len2,
- 1))
+ pos2 + pack_length, byte_len2))
return TRUE;
key1+= pack_length; key2+= pack_length;
}
diff --git a/sql/lex.h b/sql/lex.h
index 22ff4e6d360..da5fa2de137 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -470,6 +470,7 @@ static SYMBOL symbols[] = {
{ "REAL", SYM(REAL)},
{ "REBUILD", SYM(REBUILD_SYM)},
{ "RECOVER", SYM(RECOVER_SYM)},
+ { "RECURSIVE", SYM(RECURSIVE_SYM)},
{ "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)},
{ "REDOFILE", SYM(REDOFILE_SYM)},
{ "REDUNDANT", SYM(REDUNDANT_SYM)},
diff --git a/sql/log.cc b/sql/log.cc
index 33bd48b3c21..dc8c08bfd36 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -632,7 +632,7 @@ void Log_to_csv_event_handler::cleanup()
indicated in the return value.
@retval FALSE OK
- @retval TRUE error occured
+ @retval TRUE error occurred
*/
bool Log_to_csv_event_handler::
@@ -797,7 +797,7 @@ err:
RETURN
FALSE - OK
- TRUE - error occured
+ TRUE - error occurred
*/
bool Log_to_csv_event_handler::
@@ -1108,7 +1108,7 @@ void Log_to_file_event_handler::flush()
RETURN
FALSE - OK
- TRUE - error occured
+ TRUE - error occurred
*/
bool LOGGER::error_log_print(enum loglevel level, const char *format,
@@ -1266,7 +1266,7 @@ bool LOGGER::flush_general_log()
RETURN
FALSE OK
- TRUE error occured
+ TRUE error occurred
*/
bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
@@ -2839,7 +2839,7 @@ void MYSQL_QUERY_LOG::reopen_file()
RETURN
FASE - OK
- TRUE - error occured
+ TRUE - error occurred
*/
bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host,
@@ -2941,7 +2941,7 @@ err:
RETURN
FALSE - OK
- TRUE - error occured
+ TRUE - error occurred
*/
bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
@@ -6403,7 +6403,7 @@ binlog_checkpoint_callback(void *cookie)
/*
For every supporting engine, we increment the xid_count and issue a
commit_checkpoint_request(). Then we can count when all
- commit_checkpoint_notify() callbacks have occured, and then log a new
+ commit_checkpoint_notify() callbacks have occurred, and then log a new
binlog checkpoint event.
*/
mysql_bin_log.mark_xids_active(entry->binlog_id, 1);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 623817498b2..820d49c220e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1392,9 +1392,9 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
if (packet->append(file, data_len - LOG_EVENT_MINIMAL_HEADER_LEN))
{
/*
- Fatal error occured when appending rest of the event
+ Fatal error occurred when appending rest of the event
to packet, possible failures:
- 1. EOF occured when reading from file, it's really an error
+ 1. EOF occurred when reading from file, it's really an error
as there's supposed to be more bytes available.
file->error will have been set to number of bytes left to read
2. Read was interrupted, file->error would normally be set to -1
@@ -4428,8 +4428,18 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
if (thd->m_digest != NULL)
thd->m_digest->reset(thd->m_token_array, max_digest_length);
+ if (thd->slave_thread)
+ {
+ /*
+ The opt_log_slow_slave_statements variable can be changed
+ dynamically, so we have to set the sql_log_slow respectively.
+ */
+ thd->variables.sql_log_slow= opt_log_slow_slave_statements;
+ }
+
thd->enable_slow_log= thd->variables.sql_log_slow;
- mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ FALSE);
/* Finalize server status flags after executing a statement. */
thd->update_server_status();
log_slow_statement(thd);
@@ -4513,7 +4523,7 @@ compare_errors:
"Error on master: message (format)='%s' error code=%d ; "
"Error on slave: actual message='%s', error code=%d. "
"Default database: '%s'. Query: '%s'",
- ER_SAFE_THD(thd, expected_error),
+ ER_THD(thd, expected_error),
expected_error,
actual_error ? thd->get_stmt_da()->message() : "no error",
actual_error,
@@ -7463,6 +7473,7 @@ bool slave_execute_deferred_events(THD *thd)
return res;
res= rgi->deferred_events->execute(rgi);
+ rgi->deferred_events->rewind();
return res;
}
@@ -11475,7 +11486,10 @@ Rows_log_event::write_row(rpl_group_info *rgi,
/* unpack row into table->record[0] */
if ((error= unpack_current_row(rgi)))
+ {
+ table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
+ }
if (m_curr_row == m_rows_buf && !invoke_triggers)
{
@@ -12483,8 +12497,8 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
We need to read the second image in the event of error to be
able to skip to the next pair of updates
*/
- m_curr_row= m_curr_row_end;
- unpack_current_row(rgi, &m_cols_ai);
+ if ((m_curr_row= m_curr_row_end))
+ unpack_current_row(rgi, &m_cols_ai);
thd_proc_info(thd, tmp);
return error;
}
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 387534b1bed..ff2f7b156d5 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -248,7 +248,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
}
if (error)
- { /* error has occured during the transaction */
+ { /* error has occurred during the transaction */
rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
@@ -1593,7 +1593,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
} // if (table)
if (error)
- { /* error has occured during the transaction */
+ { /* error has occurred during the transaction */
rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
diff --git a/sql/my_apc.cc b/sql/my_apc.cc
index 91f5cd3f39c..dcb8503b880 100644
--- a/sql/my_apc.cc
+++ b/sql/my_apc.cc
@@ -119,7 +119,7 @@ void init_show_explain_psi_keys(void)
@retval FALSE - Ok, the call has been made
@retval TRUE - Call wasnt made (either the target is in disabled state or
- timeout occured)
+ timeout occurred)
*/
bool Apc_target::make_apc_call(THD *caller_thd, Apc_call *call,
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index 9b4f45a9971..c39789f7c97 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -119,10 +119,10 @@ static void die(const char *fmt, ...)
if (verbose_errors)
{
fprintf(stderr,
- "http://kb.askmonty.org/v/installation-issues-on-windows contains some help\n"
+ "https://mariadb.com/kb/en/installation-issues-on-windows contains some help\n"
"for solving the most common problems. If this doesn't help you, please\n"
- "leave a comment in the Knowledgebase or file a bug report at\n"
- "http://mariadb.org/jira");
+ "leave a comment in the Knowledge Base or file a bug report at\n"
+ "https://jira.mariadb.org");
}
fflush(stderr);
va_end(args);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 56f882a4252..f53a41ead18 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -363,6 +363,7 @@ static bool volatile select_thread_in_use, signal_thread_in_use;
static volatile bool ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0, opt_silent_startup= 0;
+
uint kill_cached_threads;
static uint wake_thread;
ulong max_used_connections;
@@ -389,6 +390,7 @@ static DYNAMIC_ARRAY all_options;
bool opt_bin_log, opt_bin_log_used=0, opt_ignore_builtin_innodb= 0;
my_bool opt_log, debug_assert_if_crashed_table= 0, opt_help= 0;
+my_bool debug_assert_on_not_freed_memory= 0;
my_bool disable_log_notes;
static my_bool opt_abort;
ulonglong log_output_options;
@@ -2232,13 +2234,12 @@ void clean_up(bool print_message)
if (print_message && my_default_lc_messages && server_start_time)
sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname);
- cleanup_errmsgs();
MYSQL_CALLBACK(thread_scheduler, end, ());
thread_scheduler= 0;
mysql_library_end();
finish_client_errs();
- (void) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); // finish server errs
- DBUG_PRINT("quit", ("Error messages freed"));
+ cleanup_errmsgs();
+ free_error_messages();
/* Tell main we are ready */
logger.cleanup_end();
sys_var_end();
@@ -2993,6 +2994,7 @@ void unlink_thd(THD *thd)
static bool cache_thread()
{
+ struct timespec abstime;
DBUG_ENTER("cache_thread");
mysql_mutex_lock(&LOCK_thread_cache);
@@ -3011,8 +3013,26 @@ static bool cache_thread()
PSI_THREAD_CALL(delete_current_thread)();
#endif
+#ifndef DBUG_OFF
+ while (_db_is_pushed_())
+ _db_pop_();
+#endif
+
+ set_timespec(abstime, THREAD_CACHE_TIMEOUT);
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
- mysql_cond_wait(&COND_thread_cache, &LOCK_thread_cache);
+ {
+ int error= mysql_cond_timedwait(&COND_thread_cache, &LOCK_thread_cache,
+ &abstime);
+ if (error == ETIMEDOUT || error == ETIME)
+ {
+ /*
+ If timeout, end thread.
+ If a new thread is requested (wake_thread is set), we will handle
+ the call, even if we got a timeout (as we are already awake and free)
+ */
+ break;
+ }
+ }
cached_thread_count--;
if (kill_cached_threads)
mysql_cond_signal(&COND_flush_thread_cache);
@@ -3884,6 +3904,7 @@ SHOW_VAR com_status_vars[]= {
{"kill", STMT_STATUS(SQLCOM_KILL)},
{"load", STMT_STATUS(SQLCOM_LOAD)},
{"lock_tables", STMT_STATUS(SQLCOM_LOCK_TABLES)},
+ {"multi", COM_STATUS(com_multi)},
{"optimize", STMT_STATUS(SQLCOM_OPTIMIZE)},
{"preload_keys", STMT_STATUS(SQLCOM_PRELOAD_KEYS)},
{"prepare_sql", STMT_STATUS(SQLCOM_PREPARE)},
@@ -4084,7 +4105,8 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
(longlong) thd->status_var.local_memory_used,
size));
thd->status_var.local_memory_used+= size;
- DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0);
+ DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 ||
+ !debug_assert_on_not_freed_memory);
}
}
}
@@ -4316,7 +4338,7 @@ static int init_common_variables()
of SQLCOM_ constants.
*/
compile_time_assert(sizeof(com_status_vars)/sizeof(com_status_vars[0]) - 1 ==
- SQLCOM_END + 10);
+ SQLCOM_END + 11);
#endif
if (get_options(&remaining_argc, &remaining_argv))
@@ -5989,6 +6011,8 @@ int mysqld_main(int argc, char **argv)
mysqld_port,
MYSQL_COMPILATION_COMMENT);
+ sql_print_information(ER_DEFAULT(ER_EXTRA_TEST)); // QQ
+
// try to keep fd=0 busy
if (!freopen(IF_WIN("NUL","/dev/null"), "r", stdin))
{
@@ -6049,7 +6073,7 @@ int mysqld_main(int argc, char **argv)
CloseHandle(hEventShutdown);
}
#endif
-#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
+#if (defined(HAVE_OPENSSL) && !defined(HAVE_YASSL)) && !defined(EMBEDDED_LIBRARY)
ERR_remove_state(0);
#endif
mysqld_exit(0);
@@ -7297,6 +7321,13 @@ struct my_option my_long_options[]=
&opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
#endif /* HAVE_REPLICATION */
+#ifndef DBUG_OFF
+ {"debug-assert-on-not-freed-memory", 0,
+ "Assert if we found problems with memory allocation",
+ &debug_assert_on_not_freed_memory,
+ &debug_assert_on_not_freed_memory, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0,
+ 0},
+#endif /* DBUG_OFF */
/* default-storage-engine should have "MyISAM" as def_value. Instead
of initializing it here it is done in init_common_variables() due
to a compiler bug in Sun Studio compiler. */
@@ -7393,14 +7424,6 @@ struct my_option my_long_options[]=
"Don't log extra information to update and slow-query logs.",
&opt_short_log_format, &opt_short_log_format,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log-slow-admin-statements", 0,
- "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to "
- "the slow log if it is open.", &opt_log_slow_admin_statements,
- &opt_log_slow_admin_statements, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log-slow-slave-statements", 0,
- "Log slow statements executed by slave thread to the slow log if it is open.",
- &opt_log_slow_slave_statements, &opt_log_slow_slave_statements,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"log-tc", 0,
"Path to transaction coordinator log (used for transactions that affect "
"more than one storage engine, when binary log is disabled).",
diff --git a/sql/mysqld.h b/sql/mysqld.h
index ed9b711a4d1..ef4a0d6a47a 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -119,6 +119,7 @@ extern bool opt_disable_networking, opt_skip_show_db;
extern bool opt_skip_name_resolve;
extern bool opt_ignore_builtin_innodb;
extern my_bool opt_character_set_client_handshake;
+extern my_bool debug_assert_on_not_freed_memory;
extern bool volatile abort_loop;
extern bool volatile in_bootstrap;
extern uint connection_count;
@@ -666,15 +667,16 @@ enum enum_query_type
/// If NULLIF(a,b) should print itself as
/// CASE WHEN a_for_comparison=b THEN NULL ELSE a_for_return_value END
/// when "a" was replaced to two different items
- /// (e.g. by equal fields propagation in optimize_cond()).
- /// The default behaviour is to print as NULLIF(a_for_return, b)
- /// which should be Ok for SHOW CREATE {VIEW|PROCEDURE|FUNCTION}
- /// as they are not affected by WHERE optimization.
- QT_ITEM_FUNC_NULLIF_TO_CASE= (1 <<6),
+ /// (e.g. by equal fields propagation in optimize_cond())
+ /// or always as NULLIF(a, b).
+ /// The default behaviour is to use CASE syntax when
+ /// a_for_return_value is not the same as a_for_comparison.
+ /// SHOW CREATE {VIEW|PROCEDURE|FUNCTION} and other cases where the
+ /// original representation is required, should set this flag.
+ QT_ITEM_ORIGINAL_FUNC_NULLIF= (1 <<6),
/// This value means focus on readability, not on ability to parse back, etc.
QT_EXPLAIN= QT_TO_SYSTEM_CHARSET |
- QT_ITEM_FUNC_NULLIF_TO_CASE |
QT_ITEM_IDENT_SKIP_CURRENT_DATABASE |
QT_ITEM_CACHE_WRAPPER_SKIP_DETAILS |
QT_ITEM_SUBSELECT_ID_ONLY,
@@ -683,7 +685,7 @@ enum enum_query_type
/// Be more detailed than QT_EXPLAIN.
/// Perhaps we should eventually include QT_ITEM_IDENT_SKIP_CURRENT_DATABASE
/// here, as it would give better readable results
- QT_EXPLAIN_EXTENDED= QT_TO_SYSTEM_CHARSET | QT_ITEM_FUNC_NULLIF_TO_CASE
+ QT_EXPLAIN_EXTENDED= QT_TO_SYSTEM_CHARSET
};
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index d81c89fe534..f0284462206 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -121,6 +121,8 @@ extern my_bool thd_net_is_killed();
static my_bool net_write_buff(NET *, const uchar *, ulong);
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags);
+
/** Init with packet info. */
my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
@@ -129,14 +131,12 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
DBUG_PRINT("enter", ("my_flags: %u", my_flags));
net->vio = vio;
my_net_local_init(net); /* Set some limits */
- if (!(net->buff=(uchar*) my_malloc((size_t) net->max_packet+
- NET_HEADER_SIZE + COMP_HEADER_SIZE +1,
- MYF(MY_WME | my_flags))))
+
+ if (net_allocate_new_packet(net, thd, my_flags))
DBUG_RETURN(1);
- net->buff_end=net->buff+net->max_packet;
+
net->error=0; net->return_status=0;
net->pkt_nr=net->compress_pkt_nr=0;
- net->write_pos=net->read_pos = net->buff;
net->last_error[0]=0;
net->compress=0; net->reading_or_writing=0;
net->where_b = net->remain_in_buf=0;
@@ -165,6 +165,18 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
DBUG_RETURN(0);
}
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags)
+{
+ DBUG_ENTER("net_allocate_new_packet");
+ if (!(net->buff=(uchar*) my_malloc((size_t) net->max_packet+
+ NET_HEADER_SIZE + COMP_HEADER_SIZE +1,
+ MYF(MY_WME | my_flags))))
+ DBUG_RETURN(1);
+ net->buff_end=net->buff+net->max_packet;
+ net->write_pos=net->read_pos = net->buff;
+ DBUG_RETURN(0);
+}
+
void net_end(NET *net)
{
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index ed7e9a56ae5..7169a3eda81 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -114,12 +114,11 @@
#include "sql_parse.h" // check_stack_overrun
#include "sql_partition.h" // get_part_id_func, PARTITION_ITERATOR,
// struct partition_info, NOT_A_PARTITION_ID
-#include "sql_base.h" // free_io_cache
#include "records.h" // init_read_record, end_read_record
#include <m_ctype.h>
#include "sql_select.h"
#include "sql_statistics.h"
-#include "filesort.h" // filesort_free_buffers
+#include "uniques.h"
#ifndef EXTRA_DEBUG
#define test_rb_tree(A,B) {}
@@ -1154,6 +1153,7 @@ int imerge_list_and_tree(RANGE_OPT_PARAM *param,
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
table_map read_tables, COND *conds,
+ SORT_INFO *filesort,
bool allow_null_cond,
int *error)
{
@@ -1174,13 +1174,16 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
select->head=head;
select->cond= conds;
- if (head->sort.io_cache)
+ if (filesort && my_b_inited(&filesort->io_cache))
{
- select->file= *head->sort.io_cache;
+ /*
+ Hijack the filesort io_cache for make_select
+ SQL_SELECT will be responsible for ensuring that it's properly freed.
+ */
+ select->file= filesort->io_cache;
select->records=(ha_rows) (select->file.end_of_file/
head->file->ref_length);
- my_free(head->sort.io_cache);
- head->sort.io_cache=0;
+ my_b_clear(&filesort->io_cache);
}
DBUG_RETURN(select);
}
@@ -1393,7 +1396,6 @@ QUICK_INDEX_SORT_SELECT::~QUICK_INDEX_SORT_SELECT()
delete pk_quick_select;
/* It's ok to call the next two even if they are already deinitialized */
end_read_record(&read_record);
- free_io_cache(head);
free_root(&alloc,MYF(0));
DBUG_VOID_RETURN;
}
@@ -10674,7 +10676,6 @@ int read_keys_and_merge_scans(THD *thd,
else
{
unique->reset();
- filesort_free_buffers(head, false);
}
DBUG_ASSERT(file->ref_length == unique->get_size());
@@ -10727,7 +10728,7 @@ int read_keys_and_merge_scans(THD *thd,
/*
Ok all rowids are in the Unique now. The next call will initialize
- head->sort structure so it can be used to iterate through the rowids
+ the unique structure so it can be used to iterate through the rowids
sequence.
*/
result= unique->get(head);
@@ -10736,7 +10737,8 @@ int read_keys_and_merge_scans(THD *thd,
*/
if (enabled_keyread)
head->disable_keyread();
- if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE))
+ if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0,
+ &unique->sort, 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
@@ -10779,7 +10781,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
- free_io_cache(head);
+ // Free things used by sort early. Shouldn't be strictly necessary
+ unique->sort.reset();
/* All rows from Unique have been retrieved, do a clustered PK scan */
if (pk_quick_select)
{
@@ -10814,7 +10817,7 @@ int QUICK_INDEX_INTERSECT_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
- free_io_cache(head);
+ unique->sort.reset(); // Free things early
}
DBUG_RETURN(result);
@@ -14618,6 +14621,4 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
}
}
-
#endif /* !DBUG_OFF */
-
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 80f4064a529..6970b87f6d8 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -26,6 +26,8 @@
#include "records.h" /* READ_RECORD */
#include "queues.h" /* QUEUE */
+#include "filesort.h" /* SORT_INFO */
+
/*
It is necessary to include set_var.h instead of item.h because there
are dependencies on include order for set_var.h and item.h. This
@@ -1658,6 +1660,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
ha_rows records);
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
table_map read_tables, COND *conds,
+ SORT_INFO* filesort,
bool allow_null_cond, int *error);
bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond);
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index c1052869c8f..79b28a5994e 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -4175,7 +4175,6 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
field->null_ptr,
field->null_bit)))
goto err;
- key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; //todo need this?
}
keyinfo->key_length+= key_part_info->length;
}
@@ -4739,8 +4738,6 @@ int clear_sj_tmp_tables(JOIN *join)
{
if ((res= table->file->ha_delete_all_rows()))
return res; /* purecov: inspected */
- free_io_cache(table);
- filesort_free_buffers(table,0);
}
SJ_MATERIALIZATION_INFO *sjm;
diff --git a/sql/records.cc b/sql/records.cc
index ebda0ed35b0..3995bea6569 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -29,10 +29,10 @@
#include "records.h"
#include "sql_priv.h"
#include "records.h"
-#include "filesort.h" // filesort_free_buffers
#include "opt_range.h" // SQL_SELECT
#include "sql_class.h" // THD
#include "sql_base.h"
+#include "sql_sort.h" // SORT_ADDON_FIELD
static int rr_quick(READ_RECORD *info);
int rr_sequential(READ_RECORD *info);
@@ -182,26 +182,30 @@ bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
+ SORT_INFO *filesort,
int use_record_cache, bool print_error,
bool disable_rr_cache)
{
IO_CACHE *tempfile;
+ SORT_ADDON_FIELD *addon_field= filesort ? filesort->addon_field : 0;
DBUG_ENTER("init_read_record");
bzero((char*) info,sizeof(*info));
info->thd=thd;
info->table=table;
info->forms= &info->table; /* Only one table */
+ info->addon_field= addon_field;
if ((table->s->tmp_table == INTERNAL_TMP_TABLE ||
table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) &&
- !table->sort.addon_field)
+ !addon_field)
(void) table->file->extra(HA_EXTRA_MMAP);
- if (table->sort.addon_field)
+ if (addon_field)
{
- info->rec_buf= table->sort.addon_buf;
- info->ref_length= table->sort.addon_length;
+ info->rec_buf= (uchar*) filesort->addon_buf.str;
+ info->ref_length= filesort->addon_buf.length;
+ info->unpack= filesort->unpack;
}
else
{
@@ -213,19 +217,20 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
info->print_error=print_error;
info->unlock_row= rr_unlock_row;
info->ignore_not_found_rows= 0;
- table->status=0; /* And it's always found */
+ table->status= 0; /* Rows are always found */
+ tempfile= 0;
if (select && my_b_inited(&select->file))
tempfile= &select->file;
- else
- tempfile= table->sort.io_cache;
- if (tempfile && my_b_inited(tempfile) &&
- !(select && select->quick))
+ else if (filesort && my_b_inited(&filesort->io_cache))
+ tempfile= &filesort->io_cache;
+
+ if (tempfile && !(select && select->quick))
{
DBUG_PRINT("info",("using rr_from_tempfile"));
- info->read_record= (table->sort.addon_field ?
+ info->read_record= (addon_field ?
rr_unpack_from_tempfile : rr_from_tempfile);
- info->io_cache=tempfile;
+ info->io_cache= tempfile;
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
info->ref_pos=table->file->ref;
if (!table->file->inited)
@@ -233,12 +238,12 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
DBUG_RETURN(1);
/*
- table->sort.addon_field is checked because if we use addon fields,
+ addon_field is checked because if we use addon fields,
it doesn't make sense to use cache - we don't read from the table
- and table->sort.io_cache is read sequentially
+ and filesort->io_cache is read sequentially
*/
if (!disable_rr_cache &&
- !table->sort.addon_field &&
+ !addon_field &&
thd->variables.read_rnd_buff_size &&
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
(table->db_stat & HA_READ_ONLY ||
@@ -263,15 +268,15 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
DBUG_PRINT("info",("using rr_quick"));
info->read_record=rr_quick;
}
- else if (table->sort.record_pointers)
+ else if (filesort && filesort->record_pointers)
{
DBUG_PRINT("info",("using record_pointers"));
if (table->file->ha_rnd_init_with_error(0))
DBUG_RETURN(1);
- info->cache_pos=table->sort.record_pointers;
- info->cache_end=info->cache_pos+
- table->sort.found_records*info->ref_length;
- info->read_record= (table->sort.addon_field ?
+ info->cache_pos= filesort->record_pointers;
+ info->cache_end= (info->cache_pos+
+ filesort->return_rows * info->ref_length);
+ info->read_record= (addon_field ?
rr_unpack_from_buffer : rr_from_pointers);
}
else
@@ -288,7 +293,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
(use_record_cache < 0 &&
!(table->file->ha_table_flags() & HA_NOT_DELETE_WITH_CACHE))))
(void) table->file->extra_opt(HA_EXTRA_CACHE,
- thd->variables.read_buff_size);
+ thd->variables.read_buff_size);
}
/* Condition pushdown to storage engine */
if ((table->file->ha_table_flags() & HA_CAN_TABLE_CONDITION_PUSHDOWN) &&
@@ -311,7 +316,6 @@ void end_read_record(READ_RECORD *info)
}
if (info->table)
{
- filesort_free_buffers(info->table,0);
if (info->table->created)
(void) info->table->file->extra(HA_EXTRA_NO_CACHE);
if (info->read_record != rr_quick) // otherwise quick_range does it
@@ -525,9 +529,8 @@ static int rr_unpack_from_tempfile(READ_RECORD *info)
{
if (my_b_read(info->io_cache, info->rec_buf, info->ref_length))
return -1;
- TABLE *table= info->table;
- (*table->sort.unpack)(table->sort.addon_field, info->rec_buf,
- info->rec_buf + info->ref_length);
+ (*info->unpack)(info->addon_field, info->rec_buf,
+ info->rec_buf + info->ref_length);
return 0;
}
@@ -577,11 +580,9 @@ static int rr_unpack_from_buffer(READ_RECORD *info)
{
if (info->cache_pos == info->cache_end)
return -1; /* End of buffer */
- TABLE *table= info->table;
- (*table->sort.unpack)(table->sort.addon_field, info->cache_pos,
- info->cache_end);
+ (*info->unpack)(info->addon_field, info->cache_pos,
+ info->cache_end);
info->cache_pos+= info->ref_length;
-
return 0;
}
/* cacheing of records from a database */
diff --git a/sql/records.h b/sql/records.h
index a3f0b5eb084..1928acfd4f4 100644
--- a/sql/records.h
+++ b/sql/records.h
@@ -25,6 +25,7 @@ struct TABLE;
class THD;
class SQL_SELECT;
class Copy_field;
+class SORT_INFO;
/**
A context for reading through a single table using a chosen access method:
@@ -60,8 +61,10 @@ struct READ_RECORD
uchar *record;
uchar *rec_buf; /* to read field values after filesort */
uchar *cache,*cache_pos,*cache_end,*read_positions;
+ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
struct st_io_cache *io_cache;
bool print_error, ignore_not_found_rows;
+ void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
/*
SJ-Materialization runtime may need to read fields from the materialized
@@ -74,7 +77,8 @@ public:
};
bool init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
- SQL_SELECT *select, int use_record_cache,
+ SQL_SELECT *select, SORT_INFO *sort,
+ int use_record_cache,
bool print_errors, bool disable_rr_cache);
bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx, bool reverse);
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 286162efcb8..df036d0e23f 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -705,7 +705,7 @@ do_retry:
thd->clear_error();
/*
- If we retry due to a deadlock kill that occured during the commit step, we
+ If we retry due to a deadlock kill that occurred during the commit step, we
might have already updated (but not committed) an update of table
mysql.gtid_slave_pos, and cleared the gtid_pending flag. Now we have
rolled back any such update, so we must set the gtid_pending flag back to
@@ -987,7 +987,6 @@ handle_rpl_parallel_thread(void *arg)
thd->client_capabilities = CLIENT_LOCAL_FILES;
thd->net.reading_or_writing= 0;
thd_proc_info(thd, "Waiting for work from main SQL threads");
- thd->set_time();
thd->variables.lock_wait_timeout= LONG_TIMEOUT;
thd->system_thread_info.rpl_sql_info= &sql_info;
/*
@@ -1126,7 +1125,7 @@ handle_rpl_parallel_thread(void *arg)
/*
Register ourself to wait for the previous commit, if we need to do
such registration _and_ that previous commit has not already
- occured.
+ occurred.
*/
register_wait_for_prior_event_group_commit(rgi, entry);
@@ -1187,7 +1186,7 @@ handle_rpl_parallel_thread(void *arg)
{
/*
Do an extra check for (deadlock) kill here. This helps prevent a
- lingering deadlock kill that occured during normal DML processing to
+ lingering deadlock kill that occurred during normal DML processing to
propagate past the mark_start_commit(). If we detect a deadlock only
after mark_start_commit(), we have to unmark, which has at least a
theoretical possibility of leaving a window where it looks like all
diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h
index 9bb37f77dbd..c6f77b0144c 100644
--- a/sql/rpl_parallel.h
+++ b/sql/rpl_parallel.h
@@ -319,7 +319,7 @@ struct rpl_parallel_entry {
group here. Then later event groups (with higher sub_id) can know not to
try to start (event groups that already started will be rolled back when
wait_for_prior_commit() returns error).
- The value is ULONGLONG_MAX when no error occured.
+ The value is ULONGLONG_MAX when no error occurred.
*/
uint64 stop_on_error_sub_id;
/*
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index 208b2b61704..f82c5a3982a 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -185,7 +185,7 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
@retval HA_ERR_GENERIC
A generic, internal, error caused the unpacking to fail.
- @retval ER_SLAVE_CORRUPT_EVENT
+ @retval HA_ERR_CORRUPT_EVENT
Found error when trying to unpack fields.
*/
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
@@ -349,7 +349,7 @@ unpack_row(rpl_group_info *rgi,
"Could not read field '%s' of table '%s.%s'",
f->field_name, table->s->db.str,
table->s->table_name.str);
- DBUG_RETURN(ER_SLAVE_CORRUPT_EVENT);
+ DBUG_RETURN(HA_ERR_CORRUPT_EVENT);
}
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 18f6cbc41fd..b5430c56865 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -739,7 +739,7 @@ int set_var::check(THD *thd)
if ((!value->fixed &&
value->fix_fields(thd, &value)) || value->check_cols(1))
return -1;
- if (var->check_update_type(value->result_type()))
+ if (var->check_update_type(value))
{
my_error(ER_WRONG_TYPE_FOR_VAR, MYF(0), var->name.str);
return -1;
diff --git a/sql/set_var.h b/sql/set_var.h
index b8192e67ca9..cf86ecf18fa 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -137,8 +137,9 @@ public:
bool is_set_stmt_ok() const { return !(flags & NO_SET_STATEMENT); }
bool is_written_to_binlog(enum_var_type type)
{ return type != OPT_GLOBAL && binlog_status == SESSION_VARIABLE_IN_BINLOG; }
- bool check_update_type(Item_result type)
+ bool check_update_type(const Item *item)
{
+ Item_result type= item->result_type();
switch (option.var_type & GET_TYPE_MASK) {
case GET_INT:
case GET_UINT:
@@ -146,7 +147,8 @@ public:
case GET_ULONG:
case GET_LL:
case GET_ULL:
- return type != INT_RESULT;
+ return type != INT_RESULT &&
+ (type != DECIMAL_RESULT || item->decimals != 0);
case GET_STR:
case GET_STR_ALLOC:
return type != STRING_RESULT;
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 2ef64a86f45..bcad75de25d 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -6034,7 +6034,7 @@ ER_EVENT_CANNOT_ALTER_IN_THE_PAST
eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future."
ger "Execution Zeitpunkt des Ereignisses in der Vergangenheit liegt, und es war NACH ABSCHLUSS Set nicht erhalten. Die Veranstaltung wurde nicht verändert. Geben Sie einen Zeitpunkt in der Zukunft."
ER_SLAVE_INCIDENT
- eng "The incident %s occured on the master. Message: %-.64s"
+ eng "The incident %s occurred on the master. Message: %-.64s"
ger "Der Vorfall %s passierte auf dem Master. Meldung: %-.64s"
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
eng "Table has no partition for some existing values"
@@ -7139,3 +7139,35 @@ ER_KILL_QUERY_DENIED_ERROR
ER_NO_EIS_FOR_FIELD
eng "Engine-independent statistics are not collected for column '%s'"
ukr "Незалежна від типу таблиці статистика не збирається для стовбця '%s'"
+ER_COMMULTI_BADCONTEXT 0A000
+ eng "COM_MULTI can't return a result set in the given context"
+ ger "COM_MULTI kann im gegebenen Kontext keine Ergebnismenge zurückgeben"
+ ukr "COM_MULTI не може повернути результати у цьому контексті"
+ER_BAD_COMMAND_IN_MULTI
+ eng "Command '%s' is not allowed for COM_MULTI"
+ ukr "Команда '%s' не дозволена для COM_MULTI"
+ER_WITH_COL_WRONG_LIST
+ eng "WITH column list and SELECT field list have different column counts"
+ER_DUP_QUERY_NAME
+ eng "Duplicate query name in WITH clause"
+ER_WRONG_ORDER_IN_WITH_CLAUSE
+ eng "The definition of the table '%s' refers to the table '%s' defined later in a non-recursive WITH clause"
+ER_RECURSIVE_QUERY_IN_WITH_CLAUSE
+ eng "Recursive queries in WITH clause are not supported yet"
+
+#
+# Internal errors, not used
+#
+skip-to-error-number 2000
+
+# MySQL 5.7 error numbers starts here
+skip-to-error-number 3000
+
+ER_MYSQL_57_TEST
+ eng "5.7 test"
+
+# MariaDB extra error numbers starts from 4000
+skip-to-error-number 4000
+
+ER_EXTRA_TEST
+ eng "10.2 test"
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 7151d489c1a..bbe714fc5b4 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -100,7 +100,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
"or misconfigured. This error can also be caused by malfunctioning hardware.\n\n");
my_safe_printf_stderr("%s",
- "To report this bug, see http://kb.askmonty.org/en/reporting-bugs\n\n");
+ "To report this bug, see https://mariadb.com/kb/en/reporting-bugs\n\n");
my_safe_printf_stderr("%s",
"We will try our best to scrape up some info that will hopefully help\n"
@@ -226,7 +226,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
if (calling_initgroups)
{
my_safe_printf_stderr("%s", "\n"
- "This crash occured while the server was calling initgroups(). This is\n"
+ "This crash occurred while the server was calling initgroups(). This is\n"
"often due to the use of a mysqld that is statically linked against \n"
"glibc and configured to use LDAP in /etc/nsswitch.conf.\n"
"You will need to either upgrade to a version of glibc that does not\n"
diff --git a/sql/slave.cc b/sql/slave.cc
index 12eac9c1082..93506bc2ccd 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -4945,6 +4945,7 @@ err_during_init:
*/
mysql_mutex_lock(&LOCK_active_mi);
if (opt_slave_parallel_threads > 0 &&
+ master_info_index &&// master_info_index is set to NULL on server shutdown
!master_info_index->any_slave_sql_running())
rpl_parallel_inactivate_pool(&global_rpl_thread_pool);
mysql_mutex_unlock(&LOCK_active_mi);
diff --git a/sql/sp.cc b/sql/sp.cc
index 6ec59143720..a518b520786 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -34,6 +34,10 @@
#include <my_user.h>
+/* Used in error handling only */
+#define SP_TYPE_STRING(type) \
+ (type == TYPE_ENUM_FUNCTION ? "FUNCTION" : "PROCEDURE")
+
static int
db_load_routine(THD *thd, stored_procedure_type type, sp_name *name,
sp_head **sphp,
@@ -1007,15 +1011,16 @@ sp_drop_routine_internal(THD *thd, stored_procedure_type type,
followed by an implicit grant (sp_grant_privileges())
and this subsequent call opens and closes mysql.procs_priv.
- @return Error code. SP_OK is returned on success. Other
- SP_ constants are used to indicate about errors.
+ @return Error status.
+ @retval FALSE on success
+ @retval TRUE on error
*/
-int
+bool
sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
{
LEX *lex= thd->lex;
- int ret;
+ bool ret= TRUE;
TABLE *table;
char definer_buf[USER_HOST_BUFF_SIZE];
LEX_STRING definer;
@@ -1040,7 +1045,22 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
/* Grab an exclusive MDL lock. */
if (lock_object_name(thd, mdl_type, sp->m_db.str, sp->m_name.str))
- DBUG_RETURN(SP_OPEN_TABLE_FAILED);
+ {
+ my_error(ER_BAD_DB_ERROR, MYF(0), sp->m_db.str);
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Check that a database directory with this name
+ exists. Design note: This won't work on virtual databases
+ like information_schema.
+ */
+ if (check_db_dir_existence(sp->m_db.str))
+ {
+ my_error(ER_BAD_DB_ERROR, MYF(0), sp->m_db.str);
+ DBUG_RETURN(TRUE);
+ }
+
/* Reset sql_mode during data dictionary operations. */
thd->variables.sql_mode= 0;
@@ -1049,7 +1069,10 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
thd->count_cuted_fields= CHECK_FIELD_WARN;
if (!(table= open_proc_table_for_update(thd)))
- ret= SP_OPEN_TABLE_FAILED;
+ {
+ my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(type),sp->m_name.str);
+ goto done;
+ }
else
{
/* Checking if the routine already exists */
@@ -1065,11 +1088,10 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_ALREADY_EXISTS,
ER_THD(thd, ER_SP_ALREADY_EXISTS),
- type == TYPE_ENUM_FUNCTION ?
- "FUNCTION" : "PROCEDURE",
+ SP_TYPE_STRING(type),
lex->spname->m_name.str);
- ret= SP_OK;
+ ret= FALSE;
// Setting retstr as it is used for logging.
if (sp->m_type == TYPE_ENUM_FUNCTION)
@@ -1078,7 +1100,8 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
}
else
{
- ret= SP_WRITE_ROW_FAILED;
+ my_error(ER_SP_ALREADY_EXISTS, MYF(0),
+ SP_TYPE_STRING(type), sp->m_name.str);
goto done;
}
}
@@ -1090,7 +1113,8 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
if (table->s->fields < MYSQL_PROC_FIELD_COUNT)
{
- ret= SP_GET_FIELD_FAILED;
+ my_error(ER_SP_STORE_FAILED, MYF(0),
+ SP_TYPE_STRING(type), sp->m_name.str);
goto done;
}
@@ -1099,12 +1123,12 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
sp->m_name.str+sp->m_name.length) >
table->field[MYSQL_PROC_FIELD_NAME]->char_length())
{
- ret= SP_BAD_IDENTIFIER;
+ my_error(ER_TOO_LONG_IDENT, MYF(0), sp->m_name.str);
goto done;
}
if (sp->m_body.length > table->field[MYSQL_PROC_FIELD_BODY]->field_length)
{
- ret= SP_BODY_TOO_LONG;
+ my_error(ER_TOO_LONG_BODY, MYF(0), sp->m_name.str);
goto done;
}
@@ -1193,17 +1217,13 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
if (access == SP_CONTAINS_SQL ||
access == SP_MODIFIES_SQL_DATA)
{
- my_message(ER_BINLOG_UNSAFE_ROUTINE,
- ER_THD(thd, ER_BINLOG_UNSAFE_ROUTINE), MYF(0));
- ret= SP_INTERNAL_ERROR;
+ my_error(ER_BINLOG_UNSAFE_ROUTINE, MYF(0));
goto done;
}
}
if (!(thd->security_ctx->master_access & SUPER_ACL))
{
- my_message(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER,
- ER_THD(thd, ER_BINLOG_CREATE_ROUTINE_NEED_SUPER), MYF(0));
- ret= SP_INTERNAL_ERROR;
+ my_error(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER,MYF(0));
goto done;
}
}
@@ -1234,22 +1254,24 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
if (store_failed)
{
- ret= SP_FLD_STORE_FAILED;
+ my_error(ER_CANT_CREATE_SROUTINE, MYF(0), sp->m_name.str);
goto done;
}
- ret= SP_OK;
if (table->file->ha_write_row(table->record[0]))
- ret= SP_WRITE_ROW_FAILED;
+ {
+ my_error(ER_SP_ALREADY_EXISTS, MYF(0),
+ SP_TYPE_STRING(type), sp->m_name.str);
+ goto done;
+ }
/* Make change permanent and avoid 'table is marked as crashed' errors */
table->file->extra(HA_EXTRA_FLUSH);
- if (ret == SP_OK)
- sp_cache_invalidate();
+ sp_cache_invalidate();
}
log:
- if (ret == SP_OK && mysql_bin_log.is_open())
+ if (mysql_bin_log.is_open())
{
thd->clear_error();
@@ -1268,7 +1290,7 @@ log:
&(thd->lex->definer->host),
saved_mode))
{
- ret= SP_INTERNAL_ERROR;
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
goto done;
}
/* restore sql_mode when binloging */
@@ -1277,9 +1299,13 @@ log:
if (thd->binlog_query(THD::STMT_QUERY_TYPE,
log_query.ptr(), log_query.length(),
FALSE, FALSE, FALSE, 0))
- ret= SP_INTERNAL_ERROR;
+ {
+ my_error(ER_ERROR_ON_WRITE, MYF(MY_WME), "binary log", -1);
+ goto done;
+ }
thd->variables.sql_mode= 0;
}
+ ret= FALSE;
done:
thd->count_cuted_fields= saved_count_cuted_fields;
diff --git a/sql/sp.h b/sql/sp.h
index 4bfb0577fcc..df60482f8fd 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -126,7 +126,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *procs, bool is_proc);
bool
sp_show_create_routine(THD *thd, stored_procedure_type type, sp_name *name);
-int
+bool
sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp);
int
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index c8ae0028239..d58b51afc5e 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -3276,7 +3276,8 @@ sp_instr_set::print(String *str)
}
str->qs_append(m_offset);
str->qs_append(' ');
- m_value->print(str, QT_ORDINARY);
+ m_value->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
@@ -3308,9 +3309,11 @@ void
sp_instr_set_trigger_field::print(String *str)
{
str->append(STRING_WITH_LEN("set_trigger_field "));
- trigger_field->print(str, QT_ORDINARY);
+ trigger_field->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
str->append(STRING_WITH_LEN(":="));
- value->print(str, QT_ORDINARY);
+ value->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
@@ -3436,7 +3439,8 @@ sp_instr_jump_if_not::print(String *str)
str->qs_append('(');
str->qs_append(m_cont_dest);
str->qs_append(STRING_WITH_LEN(") "));
- m_expr->print(str, QT_ORDINARY);
+ m_expr->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
@@ -3532,7 +3536,8 @@ sp_instr_freturn::print(String *str)
str->qs_append(STRING_WITH_LEN("freturn "));
str->qs_append((uint)m_type);
str->qs_append(' ');
- m_value->print(str, QT_ORDINARY);
+ m_value->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
/*
@@ -4004,7 +4009,8 @@ sp_instr_set_case_expr::print(String *str)
str->qs_append(STRING_WITH_LEN(") "));
str->qs_append(m_case_expr_id);
str->qs_append(' ');
- m_case_expr->print(str, QT_ORDINARY);
+ m_case_expr->print(str, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
}
uint
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 9022f954d11..65aad1dd5a1 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -607,7 +607,7 @@ public:
instruction for CONTINUE error handlers.
@retval 0 on success,
- @retval other if some error occured
+ @retval other if some error occurred
*/
virtual int execute(THD *thd, uint *nextp) = 0;
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 5b725b5b1fb..90a39a10df9 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1206,7 +1206,8 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
(void) my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST), 20, 50, MYF(0));
if ((table= tables[HOST_TABLE].table)) // "host" table may not exist (e.g. in MySQL 5.6.7+)
{
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 1, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL,
+ 1, 1, FALSE))
goto end;
table->use_all_columns();
while (!(read_record_info.read_record(&read_record_info)))
@@ -1261,7 +1262,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_hosts);
if (init_read_record(&read_record_info, thd, table=tables[USER_TABLE].table,
- NULL, 1, 1, FALSE))
+ NULL, NULL, 1, 1, FALSE))
goto end;
table->use_all_columns();
(void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER), 50, 100, MYF(0));
@@ -1523,7 +1524,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_users);
if (init_read_record(&read_record_info, thd, table=tables[DB_TABLE].table,
- NULL, 1, 1, FALSE))
+ NULL, NULL, 1, 1, FALSE))
goto end;
table->use_all_columns();
(void) my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB), 50, 100, MYF(0));
@@ -1593,7 +1594,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
if ((table= tables[PROXIES_PRIV_TABLE].table))
{
if (init_read_record(&read_record_info, thd, table,
- NULL, 1, 1, FALSE))
+ NULL, NULL, 1, 1, FALSE))
goto end;
table->use_all_columns();
while (!(read_record_info.read_record(&read_record_info)))
@@ -1622,7 +1623,8 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
if ((table= tables[ROLES_MAPPING_TABLE].table))
{
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 1, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL, 1, 1,
+ FALSE))
goto end;
table->use_all_columns();
/* account for every role mapping */
@@ -10266,7 +10268,7 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
@return
@retval FALSE Success
- @retval TRUE An error occured. Error message not yet sent.
+ @retval TRUE An error occurred. Error message not yet sent.
*/
bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 7da2e4c032a..205621c6f9f 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -369,6 +369,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
char* db = table->db;
bool fatal_error=0;
bool open_error;
+ bool collect_eis= FALSE;
DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name));
strxmov(table_name, db, ".", table->table_name, NullS);
@@ -697,53 +698,64 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
{
compl_result_code= result_code= HA_ADMIN_INVALID;
}
+ collect_eis=
+ (table->table->s->table_category == TABLE_CATEGORY_USER &&
+ (get_use_stat_tables_mode(thd) > NEVER ||
+ lex->with_persistent_for_clause));
- if (!lex->column_list)
+ if (collect_eis)
{
- bitmap_clear_all(tab->read_set);
- for (uint fields= 0; *field_ptr; field_ptr++, fields++)
+ if (!lex->column_list)
{
- enum enum_field_types type= (*field_ptr)->type();
- if (type < MYSQL_TYPE_MEDIUM_BLOB ||
- type > MYSQL_TYPE_BLOB)
- bitmap_set_bit(tab->read_set, fields);
- else
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_NO_EIS_FOR_FIELD,
- ER_THD(thd, ER_NO_EIS_FOR_FIELD),
- (*field_ptr)->field_name);
+ bitmap_clear_all(tab->read_set);
+ for (uint fields= 0; *field_ptr; field_ptr++, fields++)
+ {
+ enum enum_field_types type= (*field_ptr)->type();
+ if (type < MYSQL_TYPE_MEDIUM_BLOB ||
+ type > MYSQL_TYPE_BLOB)
+ bitmap_set_bit(tab->read_set, fields);
+ else if (collect_eis)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NO_EIS_FOR_FIELD,
+ ER_THD(thd, ER_NO_EIS_FOR_FIELD),
+ (*field_ptr)->field_name);
+ }
}
- }
- else
- {
- int pos;
- LEX_STRING *column_name;
- List_iterator_fast<LEX_STRING> it(*lex->column_list);
+ else
+ {
+ int pos;
+ LEX_STRING *column_name;
+ List_iterator_fast<LEX_STRING> it(*lex->column_list);
- bitmap_clear_all(tab->read_set);
- while ((column_name= it++))
- {
- if (tab->s->fieldnames.type_names == 0 ||
- (pos= find_type(&tab->s->fieldnames, column_name->str,
- column_name->length, 1)) <= 0)
+ bitmap_clear_all(tab->read_set);
+ while ((column_name= it++))
{
- compl_result_code= result_code= HA_ADMIN_INVALID;
- break;
+ if (tab->s->fieldnames.type_names == 0 ||
+ (pos= find_type(&tab->s->fieldnames, column_name->str,
+ column_name->length, 1)) <= 0)
+ {
+ compl_result_code= result_code= HA_ADMIN_INVALID;
+ break;
+ }
+ pos--;
+ enum enum_field_types type= tab->field[pos]->type();
+ if (type < MYSQL_TYPE_MEDIUM_BLOB ||
+ type > MYSQL_TYPE_BLOB)
+ bitmap_set_bit(tab->read_set, pos);
+ else if (collect_eis)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NO_EIS_FOR_FIELD,
+ ER_THD(thd, ER_NO_EIS_FOR_FIELD),
+ column_name->str);
}
- pos--;
- enum enum_field_types type= tab->field[pos]->type();
- if (type < MYSQL_TYPE_MEDIUM_BLOB ||
- type > MYSQL_TYPE_BLOB)
- bitmap_set_bit(tab->read_set, pos);
- else
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_NO_EIS_FOR_FIELD,
- ER_THD(thd, ER_NO_EIS_FOR_FIELD),
- column_name->str);
+ tab->file->column_bitmaps_signal();
}
- tab->file->column_bitmaps_signal();
}
-
+ else
+ {
+ DBUG_ASSERT(!lex->column_list);
+ }
+
if (!lex->index_list)
{
tab->keys_in_use_for_query.init(tab->s->keys);
@@ -778,11 +790,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
}
- if (compl_result_code == HA_ADMIN_OK &&
- operator_func == &handler::ha_analyze &&
- table->table->s->table_category == TABLE_CATEGORY_USER &&
- (get_use_stat_tables_mode(thd) > NEVER ||
- lex->with_persistent_for_clause))
+ if (compl_result_code == HA_ADMIN_OK && collect_eis)
{
if (!(compl_result_code=
alloc_statistics_for_table(thd, table->table)) &&
diff --git a/sql/sql_audit.h b/sql/sql_audit.h
index 894108b9713..d6f670538cd 100644
--- a/sql/sql_audit.h
+++ b/sql/sql_audit.h
@@ -108,7 +108,7 @@ void mysql_audit_general_log(THD *thd, time_t time,
if (thd)
{
- event.general_thread_id= thd->thread_id;
+ event.general_thread_id= (unsigned long)thd->thread_id;
event.general_charset= thd->variables.character_set_client;
event.database= thd->db;
event.database_length= thd->db_length;
@@ -158,7 +158,7 @@ void mysql_audit_general(THD *thd, uint event_subtype,
{
event.general_user= user_buff;
event.general_user_length= make_user_name(thd, user_buff);
- event.general_thread_id= thd->thread_id;
+ event.general_thread_id= (unsigned long)thd->thread_id;
event.general_query= thd->query_string.str();
event.general_query_length= thd->query_string.length();
event.general_charset= thd->query_string.charset();
@@ -194,7 +194,7 @@ void mysql_audit_notify_connection_connect(THD *thd)
event.event_subclass= MYSQL_AUDIT_CONNECTION_CONNECT;
event.status= thd->get_stmt_da()->is_error() ?
thd->get_stmt_da()->sql_errno() : 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.user_length= safe_strlen(sctx->user);
event.priv_user= sctx->priv_user;
@@ -224,7 +224,7 @@ void mysql_audit_notify_connection_disconnect(THD *thd, int errcode)
event.event_subclass= MYSQL_AUDIT_CONNECTION_DISCONNECT;
event.status= errcode;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.user_length= safe_strlen(sctx->user);
event.priv_user= sctx->priv_user;
@@ -255,7 +255,7 @@ void mysql_audit_notify_connection_change_user(THD *thd)
event.event_subclass= MYSQL_AUDIT_CONNECTION_CHANGE_USER;
event.status= thd->get_stmt_da()->is_error() ?
thd->get_stmt_da()->sql_errno() : 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.user_length= safe_strlen(sctx->user);
event.priv_user= sctx->priv_user;
@@ -285,7 +285,7 @@ void mysql_audit_external_lock(THD *thd, TABLE_SHARE *share, int lock)
event.event_subclass= MYSQL_AUDIT_TABLE_LOCK;
event.read_only= lock == F_RDLCK;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.priv_user= sctx->priv_user;
event.priv_host= sctx->priv_host;
@@ -319,7 +319,7 @@ void mysql_audit_create_table(TABLE *table)
event.event_subclass= MYSQL_AUDIT_TABLE_CREATE;
event.read_only= 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.priv_user= sctx->priv_user;
event.priv_host= sctx->priv_host;
@@ -351,7 +351,7 @@ void mysql_audit_drop_table(THD *thd, TABLE_LIST *table)
event.event_subclass= MYSQL_AUDIT_TABLE_DROP;
event.read_only= 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.priv_user= sctx->priv_user;
event.priv_host= sctx->priv_host;
@@ -384,7 +384,7 @@ void mysql_audit_rename_table(THD *thd, const char *old_db, const char *old_tb,
event.event_subclass= MYSQL_AUDIT_TABLE_RENAME;
event.read_only= 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.priv_user= sctx->priv_user;
event.priv_host= sctx->priv_host;
@@ -416,7 +416,7 @@ void mysql_audit_alter_table(THD *thd, TABLE_LIST *table)
event.event_subclass= MYSQL_AUDIT_TABLE_ALTER;
event.read_only= 0;
- event.thread_id= thd->thread_id;
+ event.thread_id= (unsigned long)thd->thread_id;
event.user= sctx->user;
event.priv_user= sctx->priv_user;
event.priv_host= sctx->priv_host;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 6962e759023..08509a0e2bf 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -49,6 +49,7 @@
#include "transaction.h"
#include "sql_prepare.h"
#include "sql_statistics.h"
+#include "sql_cte.h"
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
@@ -348,7 +349,6 @@ void intern_close_table(TABLE *table)
table->s ? table->s->table_name.str : "?",
(long) table));
- free_io_cache(table);
delete table->triggers;
if (table->file) // Not true if placeholder
(void) closefrm(table, 1); // close file
@@ -358,21 +358,6 @@ void intern_close_table(TABLE *table)
}
-/* Free resources allocated by filesort() and read_record() */
-
-void free_io_cache(TABLE *table)
-{
- DBUG_ENTER("free_io_cache");
- if (table->sort.io_cache)
- {
- close_cached_file(table->sort.io_cache);
- my_free(table->sort.io_cache);
- table->sort.io_cache=0;
- }
- DBUG_VOID_RETURN;
-}
-
-
/**
Auxiliary function which allows to kill delayed threads for
particular table identified by its share.
@@ -1811,7 +1796,6 @@ void close_temporary(TABLE *table, bool free_share, bool delete_table)
DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
table->s->db.str, table->s->table_name.str));
- free_io_cache(table);
closefrm(table, 0);
if (delete_table)
rm_temporary_table(table_type, table->s->path.str);
@@ -3920,6 +3904,26 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
tables->table_name= tables->view_name.str;
tables->table_name_length= tables->view_name.length;
}
+ else if (tables->select_lex)
+ {
+ /*
+ Check whether 'tables' refers to a table defined in a with clause.
+ If so set the reference to the definition in tables->with.
+ */
+ if (!tables->with)
+ tables->with= tables->select_lex->find_table_def_in_with_clauses(tables);
+ /*
+ If 'tables' is defined in a with clause set the pointer to the
+ specification from its definition in tables->derived.
+ */
+ if (tables->with)
+ {
+ if (tables->set_as_with_table(thd, tables->with))
+ DBUG_RETURN(1);
+ else
+ goto end;
+ }
+ }
/*
If this TABLE_LIST object is a placeholder for an information_schema
table, create a temporary table to represent the information_schema
@@ -8354,7 +8358,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
temporary table. Thus in this case we can be sure that 'item' is an
Item_field.
*/
- if (any_privileges)
+ if (any_privileges && !tables->is_with_table() && !tables->is_derived())
{
DBUG_ASSERT((tables->field_translation == NULL && table) ||
tables->is_natural_join);
@@ -8553,7 +8557,7 @@ bool setup_on_expr(THD *thd, TABLE_LIST *table, bool is_update)
TODO
RETURN
- TRUE if some error occured (e.g. out of memory)
+ TRUE if some error occurred (e.g. out of memory)
FALSE if all is OK
*/
@@ -8663,7 +8667,7 @@ err_no_arena:
function.
@return Status
- @retval true An error occured.
+ @retval true An error occurred.
@retval false OK.
*/
@@ -8825,7 +8829,7 @@ static bool not_null_fields_have_null_values(TABLE *table)
record[1] buffers correspond to new and old versions of row respectively.
@return Status
- @retval true An error occured.
+ @retval true An error occurred.
@retval false OK.
*/
@@ -8885,7 +8889,7 @@ fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields,
function.
@return Status
- @retval true An error occured.
+ @retval true An error occurred.
@retval false OK.
*/
@@ -8980,7 +8984,7 @@ err:
record[1] buffers correspond to new and old versions of row respectively.
@return Status
- @retval true An error occured.
+ @retval true An error occurred.
@retval false OK.
*/
diff --git a/sql/sql_base.h b/sql/sql_base.h
index ef249b3ab05..b6e135b6feb 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -267,7 +267,6 @@ bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags,
uint dt_phases);
bool lock_tables(THD *thd, TABLE_LIST *tables, uint counter, uint flags);
int decide_logging_format(THD *thd, TABLE_LIST *tables);
-void free_io_cache(TABLE *entry);
void intern_close_table(TABLE *entry);
void kill_delayed_threads_for_table(TDC_element *element);
void close_thread_table(THD *thd, TABLE **table_ptr);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 38289b188b2..e3b70566597 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1426,6 +1426,7 @@ void THD::init(void)
bzero((char *) &org_status_var, sizeof(org_status_var));
start_bytes_received= 0;
last_commit_gtid.seq_no= 0;
+ last_stmt= NULL;
status_in_global= 0;
#ifdef WITH_WSREP
wsrep_exec_mode= wsrep_applier ? REPL_RECV : LOCAL_STATE;
@@ -1705,8 +1706,9 @@ THD::~THD()
if (status_var.local_memory_used != 0)
{
DBUG_PRINT("error", ("memory_used: %lld", status_var.local_memory_used));
- SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id());
- DBUG_ASSERT(status_var.local_memory_used == 0);
+ SAFEMALLOC_REPORT_MEMORY(thread_id);
+ DBUG_ASSERT(status_var.local_memory_used == 0 ||
+ !debug_assert_on_not_freed_memory);
}
set_current_thd(orig_thd == this ? 0 : orig_thd);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 4ab94b08970..7eb00d948a2 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -658,7 +658,7 @@ typedef struct system_variables
/* Error messages */
MY_LOCALE *lc_messages;
- const char **errmsgs; /* lc_messages->errmsg->errmsgs */
+ const char ***errmsgs; /* lc_messages->errmsg->errmsgs */
/* Locale Support */
MY_LOCALE *lc_time_names;
@@ -695,6 +695,7 @@ typedef struct system_status_var
ulong com_create_tmp_table;
ulong com_drop_tmp_table;
ulong com_other;
+ ulong com_multi;
ulong com_stmt_prepare;
ulong com_stmt_reprepare;
@@ -1964,6 +1965,13 @@ public:
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
+
+ /* Last created prepared statement */
+ Statement *last_stmt;
+ inline void set_last_stmt(Statement *stmt)
+ { last_stmt= (is_error() ? NULL : stmt); }
+ inline void clear_last_stmt() { last_stmt= NULL; }
+
/*
A pointer to the stack frame of handle_one_connection(),
which is called first in the thread for handling a client
@@ -2179,7 +2187,7 @@ public:
int is_current_stmt_binlog_format_row() const {
DBUG_ASSERT(current_stmt_binlog_format == BINLOG_FORMAT_STMT ||
current_stmt_binlog_format == BINLOG_FORMAT_ROW);
- return WSREP_FORMAT(current_stmt_binlog_format) == BINLOG_FORMAT_ROW;
+ return current_stmt_binlog_format == BINLOG_FORMAT_ROW;
}
enum binlog_filter_state
@@ -4564,8 +4572,6 @@ public:
uint group_parts,group_length,group_null_parts;
uint quick_group;
bool using_indirect_summary_function;
- /* If >0 convert all blob fields to varchar(convert_blob_length) */
- uint convert_blob_length;
CHARSET_INFO *table_charset;
bool schema_table;
/* TRUE if the temp table is created for subquery materialization. */
@@ -4594,7 +4600,7 @@ public:
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
- group_length(0), group_null_parts(0), convert_blob_length(0),
+ group_length(0), group_null_parts(0),
schema_table(0), materialized_subquery(0), force_not_null_cols(0),
precomputed_group_by(0),
force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0)
@@ -5018,85 +5024,7 @@ class user_var_entry
user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
bool create_if_not_exists);
-/*
- Unique -- class for unique (removing of duplicates).
- Puts all values to the TREE. If the tree becomes too big,
- it's dumped to the file. User can request sorted values, or
- just iterate through them. In the last case tree merging is performed in
- memory simultaneously with iteration, so it should be ~2-3x faster.
- */
-
-class Unique :public Sql_alloc
-{
- DYNAMIC_ARRAY file_ptrs;
- ulong max_elements;
- ulonglong max_in_memory_size;
- IO_CACHE file;
- TREE tree;
- uchar *record_pointers;
- ulong filtered_out_elems;
- bool flush();
- uint size;
- uint full_size;
- uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
- bool with_counters;
-
- bool merge(TABLE *table, uchar *buff, bool without_last_merge);
-
-public:
- ulong elements;
- Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
- uint size_arg, ulonglong max_in_memory_size_arg,
- uint min_dupl_count_arg= 0);
- ~Unique();
- ulong elements_in_tree() { return tree.elements_in_tree; }
- inline bool unique_add(void *ptr)
- {
- DBUG_ENTER("unique_add");
- DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
- if (!(tree.flag & TREE_ONLY_DUPS) &&
- tree.elements_in_tree >= max_elements && flush())
- DBUG_RETURN(1);
- DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
- }
-
- bool is_in_memory() { return (my_b_tell(&file) == 0); }
- void close_for_expansion() { tree.flag= TREE_ONLY_DUPS; }
-
- bool get(TABLE *table);
-
- /* Cost of searching for an element in the tree */
- inline static double get_search_cost(ulonglong tree_elems, uint compare_factor)
- {
- return log((double) tree_elems) / (compare_factor * M_LN2);
- }
-
- static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
- ulonglong max_in_memory_size, uint compare_factor,
- bool intersect_fl, bool *in_memory);
- inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
- ulonglong max_in_memory_size)
- {
- register ulonglong max_elems_in_tree=
- max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
- return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
- }
-
- void reset();
- bool walk(TABLE *table, tree_walk_action action, void *walk_action_arg);
-
- uint get_size() const { return size; }
- ulonglong get_max_in_memory_size() const { return max_in_memory_size; }
-
- friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
- friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
-
- friend int unique_write_to_file_with_count(uchar* key, element_count count,
- Unique *unique);
- friend int unique_intersect_write_to_ptrs(uchar* key, element_count count,
- Unique *unique);
-};
-
+class SORT_INFO;
class multi_delete :public select_result_interceptor
{
@@ -5124,7 +5052,7 @@ public:
int send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
int do_deletes();
- int do_table_deletes(TABLE *table, bool ignore);
+ int do_table_deletes(TABLE *table, SORT_INFO *sort_info, bool ignore);
bool send_eof();
inline ha_rows num_deleted()
{
@@ -5364,6 +5292,10 @@ public:
Do not check that wsrep snapshot is ready before allowing this command
*/
#define CF_SKIP_WSREP_CHECK (1U << 2)
+/**
+ Do not allow it for COM_MULTI batch
+*/
+#define CF_NO_COM_MULTI (1U << 3)
/* Inline functions */
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index ea114bf40a5..66564bd5e94 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -1192,7 +1192,6 @@ void prepare_new_connection_state(THD* thd)
*/
thd->proc_info= 0;
thd->set_command(COM_SLEEP);
- thd->set_time();
thd->init_for_queries();
if (opt_init_connect.length && !(sctx->master_access & SUPER_ACL))
@@ -1234,7 +1233,6 @@ void prepare_new_connection_state(THD* thd)
}
thd->proc_info=0;
- thd->set_time();
thd->init_for_queries();
}
}
diff --git a/sql/sql_const.h b/sql/sql_const.h
index 76e47bd278b..31ee4603dc9 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -235,6 +235,8 @@
that does not respond to "initial server greeting" timely
*/
#define CONNECT_TIMEOUT 10
+ /* Wait 5 minutes before removing thread from thread cache */
+#define THREAD_CACHE_TIMEOUT 5*60
/* The following can also be changed from the command line */
#define DEFAULT_CONCURRENCY 10
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
new file mode 100644
index 00000000000..1203a4ce0c8
--- /dev/null
+++ b/sql/sql_cte.cc
@@ -0,0 +1,601 @@
+#include "sql_class.h"
+#include "sql_lex.h"
+#include "sql_cte.h"
+#include "sql_view.h" // for make_valid_column_names
+#include "sql_parse.h"
+
+
+/**
+ @brief
+ Check dependencies between tables defined in a list of with clauses
+
+ @param
+ with_clauses_list Pointer to the first clause in the list
+
+ @details
+ The procedure just calls the method With_clause::check_dependencies
+ for each member of the given list.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool check_dependencies_in_with_clauses(With_clause *with_clauses_list)
+{
+ for (With_clause *with_clause= with_clauses_list;
+ with_clause;
+ with_clause= with_clause->next_with_clause)
+ {
+ if (with_clause->check_dependencies())
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Check dependencies between tables defined in this with clause
+
+ @details
+ The method performs the following actions for this with clause:
+
+ 1. Test for definitions of the tables with the same name.
+ 2. For each table T defined in this with clause look for tables
+ from the same with clause that are used in the query that
+ specifies T and set the dependencies of T on these tables
+ in dependency_map.
+ 3. Build the transitive closure of the above direct dependencies
+ to find out all recursive definitions.
+ 4. If this with clause is not specified as recursive then
+ for each with table T defined in this with clause check whether
+ it is used in any definition that follows the definition of T.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool With_clause::check_dependencies()
+{
+ if (dependencies_are_checked)
+ return false;
+ /*
+ Look for for definitions with the same query name.
+ When found report an error and return true immediately.
+ For each table T defined in this with clause look for all other tables from
+ the same with with clause that are used in the specification of T.
+ For each such table set the dependency bit in the dependency map of
+ with element for T.
+ */
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ for (With_element *elem= first_elem;
+ elem != with_elem;
+ elem= elem->next_elem)
+ {
+ if (my_strcasecmp(system_charset_info, with_elem->query_name->str,
+ elem->query_name->str) == 0)
+ {
+ my_error(ER_DUP_QUERY_NAME, MYF(0), with_elem->query_name->str);
+ return true;
+ }
+ }
+ with_elem->check_dependencies_in_unit(with_elem->spec);
+ }
+ /* Build the transitive closure of the direct dependencies found above */
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ table_map with_elem_map= with_elem->get_elem_map();
+ for (With_element *elem= first_elem; elem != NULL; elem= elem->next_elem)
+ {
+ if (elem->dependency_map & with_elem_map)
+ elem->dependency_map |= with_elem->dependency_map;
+ }
+ }
+
+ /*
+ Mark those elements where tables are defined with direct or indirect recursion.
+ Report an error when recursion (direct or indirect) is used to define a table.
+ */
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ if (with_elem->dependency_map & with_elem->get_elem_map())
+ with_elem->is_recursive= true;
+ }
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ if (with_elem->is_recursive)
+ {
+ my_error(ER_RECURSIVE_QUERY_IN_WITH_CLAUSE, MYF(0),
+ with_elem->query_name->str);
+ return true;
+ }
+ }
+
+ if (!with_recursive)
+ {
+ /*
+ For each with table T defined in this with clause check whether
+ it is used in any definition that follows the definition of T.
+ */
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ With_element *checked_elem= with_elem->next_elem;
+ for (uint i = with_elem->number+1;
+ i < elements;
+ i++, checked_elem= checked_elem->next_elem)
+ {
+ if (with_elem->check_dependency_on(checked_elem))
+ {
+ my_error(ER_WRONG_ORDER_IN_WITH_CLAUSE, MYF(0),
+ with_elem->query_name->str, checked_elem->query_name->str);
+ return true;
+ }
+ }
+ }
+ }
+
+ dependencies_are_checked= true;
+ return false;
+}
+
+
+/**
+ @brief
+ Check dependencies on the sibling with tables used in the given unit
+
+ @param unit The unit where the siblings are to be searched for
+
+ @details
+ The method recursively looks through all from lists encountered
+ the given unit. If it finds a reference to a table that is
+ defined in the same with clause to which this element belongs
+ the method set the bit of dependency on this table in the
+ dependency_map of this element.
+*/
+
+void With_element::check_dependencies_in_unit(st_select_lex_unit *unit)
+{
+ st_select_lex *sl= unit->first_select();
+ for (; sl; sl= sl->next_select())
+ {
+ for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local)
+ {
+ if (!tbl->with)
+ tbl->with= owner->find_table_def(tbl);
+ if (!tbl->with && tbl->select_lex)
+ tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl);
+ if (tbl->with && tbl->with->owner== this->owner)
+ set_dependency_on(tbl->with);
+ }
+ st_select_lex_unit *inner_unit= sl->first_inner_unit();
+ for (; inner_unit; inner_unit= inner_unit->next_unit())
+ check_dependencies_in_unit(inner_unit);
+ }
+}
+
+
+/**
+ @brief
+ Search for the definition of a table among the elements of this with clause
+
+ @param table The reference to the table that is looked for
+
+ @details
+ The function looks through the elements of this with clause trying to find
+ the definition of the given table. When it encounters the element with
+ the same query name as the table's name it returns this element. If no
+ such definitions are found the function returns NULL.
+
+ @retval
+ found with element if the search succeeded
+ NULL - otherwise
+*/
+
+With_element *With_clause::find_table_def(TABLE_LIST *table)
+{
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ if (my_strcasecmp(system_charset_info, with_elem->query_name->str, table->table_name) == 0)
+ {
+ return with_elem;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ @brief
+ Perform context analysis for all unreferenced tables defined in with clause
+
+ @param thd The context of the statement containing this with clause
+
+ @details
+ For each unreferenced table T defined in this with clause the method
+ calls the method With_element::prepare_unreferenced that performs
+ context analysis of the element with the definition of T.
+
+ @retval
+ false If context analysis does not report any error
+ true Otherwise
+*/
+
+bool With_clause::prepare_unreferenced_elements(THD *thd)
+{
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ if (!with_elem->is_referenced() && with_elem->prepare_unreferenced(thd))
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ @brief
+ Save the specification of the given with table as a string
+
+ @param thd The context of the statement containing this with element
+ @param spec_start The beginning of the specification in the input string
+ @param spec_end The end of the specification in the input string
+
+ @details
+ The method creates for a string copy of the specification used in this element.
+ The method is called when the element is parsed. The copy may be used to
+ create clones of the specification whenever they are needed.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool With_element::set_unparsed_spec(THD *thd, char *spec_start, char *spec_end)
+{
+ unparsed_spec.length= spec_end - spec_start;
+ unparsed_spec.str= (char*) thd->memdup(spec_start, unparsed_spec.length+1);
+ unparsed_spec.str[unparsed_spec.length]= '\0';
+
+ if (!unparsed_spec.str)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ static_cast<int>(unparsed_spec.length));
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Create a clone of the specification for the given with table
+
+ @param thd The context of the statement containing this with element
+ @param with_table The reference to the table defined in this element for which
+ the clone is created.
+
+ @details
+ The method creates a clone of the specification used in this element.
+ The clone is created for the given reference to the table defined by
+ this element.
+ The clone is created when the string with the specification saved in
+ unparsed_spec is fed into the parser as an input string. The parsing
+ this string a unit object representing the specification is build.
+ A chain of all table references occurred in the specification is also
+ formed.
+ The method includes the new unit and its sub-unit into hierarchy of
+ the units of the main query. I also insert the constructed chain of the
+ table references into the chain of all table references of the main query.
+
+ @note
+ Clones is created only for not first references to tables defined in
+ the with clause. They are necessary for merged specifications because
+ the optimizer handles any such specification as independent on the others.
+ When a table defined in the with clause is materialized in a temporary table
+ one could do without specification clones. However in this case they
+ are created as well, because currently different table references to a
+ the same temporary table cannot share the same definition structure.
+
+ @retval
+ pointer to the built clone if succeeds
+ NULL - otherwise
+*/
+
+st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
+ TABLE_LIST *with_table)
+{
+ LEX *lex;
+ st_select_lex_unit *res= NULL;
+ Query_arena backup;
+ Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ if (!(lex= (LEX*) new(thd->mem_root) st_lex_local))
+ {
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ return res;
+ }
+ LEX *old_lex= thd->lex;
+ thd->lex= lex;
+
+ bool parse_status= false;
+ Parser_state parser_state;
+ TABLE_LIST *spec_tables;
+ TABLE_LIST *spec_tables_tail;
+ st_select_lex *with_select;
+
+ if (parser_state.init(thd, unparsed_spec.str, unparsed_spec.length))
+ goto err;
+ lex_start(thd);
+ with_select= &lex->select_lex;
+ with_select->select_number= ++thd->select_number;
+ parse_status= parse_sql(thd, &parser_state, 0);
+ if (parse_status)
+ goto err;
+ spec_tables= lex->query_tables;
+ spec_tables_tail= 0;
+ for (TABLE_LIST *tbl= spec_tables;
+ tbl;
+ tbl= tbl->next_global)
+ {
+ tbl->grant.privilege= with_table->grant.privilege;
+ spec_tables_tail= tbl;
+ }
+ if (spec_tables)
+ {
+ if (with_table->next_global)
+ {
+ spec_tables_tail->next_global= with_table->next_global;
+ with_table->next_global->prev_global= &spec_tables_tail->next_global;
+ }
+ else
+ {
+ old_lex->query_tables_last= &spec_tables_tail->next_global;
+ }
+ spec_tables->prev_global= &with_table->next_global;
+ with_table->next_global= spec_tables;
+ }
+ res= &lex->unit;
+
+ lex->unit.include_down(with_table->select_lex);
+ lex->unit.set_slave(with_select);
+ old_lex->all_selects_list=
+ (st_select_lex*) (lex->all_selects_list->
+ insert_chain_before(
+ (st_select_lex_node **) &(old_lex->all_selects_list),
+ with_select));
+ lex_end(lex);
+err:
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ thd->lex= old_lex;
+ return res;
+}
+
+
+/**
+ @brief
+ Rename columns of the unit derived from the spec of this with element
+ @param thd The context of the statement containing the with element
+ @param unit The specification of the with element or its clone
+
+ @details
+ The method assumes that the parameter unit is either specification itself
+ of this with element or a clone of this specification. The looks through
+ the column list in this with element. It reports an error if the cardinality
+ of this list differs from the cardinality of select lists in 'unit'.
+ Otherwise it renames the columns of the first select list and sets the flag
+ unit->column_list_is_processed to true preventing renaming columns for the
+ second time.
+
+ @retval
+ true if an error was reported
+ false otherwise
+*/
+
+bool
+With_element::rename_columns_of_derived_unit(THD *thd,
+ st_select_lex_unit *unit)
+{
+ if (unit->columns_are_renamed)
+ return false;
+
+ st_select_lex *select= unit->first_select();
+
+ if (column_list.elements) // The column list is optional
+ {
+ List_iterator_fast<Item> it(select->item_list);
+ List_iterator_fast<LEX_STRING> nm(column_list);
+ Item *item;
+ LEX_STRING *name;
+
+ if (column_list.elements != select->item_list.elements)
+ {
+ my_error(ER_WITH_COL_WRONG_LIST, MYF(0));
+ return true;
+ }
+ /* Rename the columns of the first select in the unit */
+ while ((item= it++, name= nm++))
+ {
+ item->set_name(thd, name->str, (uint) name->length, system_charset_info);
+ item->is_autogenerated_name= false;
+ }
+ }
+
+ make_valid_column_names(thd, select->item_list);
+
+ unit->columns_are_renamed= true;
+
+ return false;
+}
+
+
+/**
+ @brief
+ Perform context analysis the definition of an unreferenced table
+
+ @param thd The context of the statement containing this with element
+
+ @details
+ The method assumes that this with element contains the definition
+ of a table that is not used anywhere. In this case one has to check
+ that context conditions are met.
+
+ @retval
+ true if an error was reported
+ false otherwise
+*/
+
+bool With_element::prepare_unreferenced(THD *thd)
+{
+ bool rc= false;
+ st_select_lex *first_sl= spec->first_select();
+
+ /* Prevent name resolution for field references out of with elements */
+ for (st_select_lex *sl= first_sl;
+ sl;
+ sl= sl->next_select())
+ sl->context.outer_context= 0;
+
+ thd->lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
+ if (!spec->prepared &&
+ (spec->prepare(thd, 0, 0) ||
+ rename_columns_of_derived_unit(thd, spec) ||
+ check_duplicate_names(thd, first_sl->item_list, 1)))
+ rc= true;
+
+ thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
+ return rc;
+}
+
+
+/**
+ @brief
+ Search for the definition of the given table referred in this select node
+
+ @param table reference to the table whose definition is searched for
+
+ @details
+ The method looks for the definition the table whose reference is occurred
+ in the FROM list of this select node. First it searches for it in the
+ with clause attached to the unit this select node belongs to. If such a
+ definition is not found there the embedding units are looked through.
+
+ @retval
+ pointer to the found definition if the search has been successful
+ NULL - otherwise
+*/
+
+With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table)
+{
+ With_element *found= NULL;
+ for (st_select_lex *sl= this;
+ sl;
+ sl= sl->master_unit()->outer_select())
+ {
+ With_clause *with_clause=sl->get_with_clause();
+ if (with_clause && (found= with_clause->find_table_def(table)))
+ return found;
+ }
+ return found;
+}
+
+
+/**
+ @brief
+ Set the specifying unit in this reference to a with table
+
+ @details
+ The method assumes that the given element with_elem defines the table T
+ this table reference refers to.
+ If this is the first reference to T the method just sets its specification
+ in the field 'derived' as the unit that yields T. Otherwise the method
+ first creates a clone specification and sets rather this clone in this field.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem)
+{
+ with= with_elem;
+ if (!with_elem->is_referenced())
+ derived= with_elem->spec;
+ else
+ {
+ if(!(derived= with_elem->clone_parsed_spec(thd, this)))
+ return true;
+ derived->with_element= with_elem;
+ }
+ with_elem->inc_references();
+ return false;
+}
+
+
+/**
+ @brief
+ Print this with clause
+
+ @param str Where to print to
+ @param query_type The mode of printing
+
+ @details
+ The method prints a string representation of this clause in the
+ string str. The parameter query_type specifies the mode of printing.
+*/
+
+void With_clause::print(String *str, enum_query_type query_type)
+{
+ str->append(STRING_WITH_LEN("WITH "));
+ if (with_recursive)
+ str->append(STRING_WITH_LEN("RECURSIVE "));
+ for (With_element *with_elem= first_elem;
+ with_elem != NULL;
+ with_elem= with_elem->next_elem)
+ {
+ with_elem->print(str, query_type);
+ if (with_elem != first_elem)
+ str->append(", ");
+ }
+}
+
+
+/**
+ @brief
+ Print this with element
+
+ @param str Where to print to
+ @param query_type The mode of printing
+
+ @details
+ The method prints a string representation of this with element in the
+ string str. The parameter query_type specifies the mode of printing.
+*/
+
+void With_element::print(String *str, enum_query_type query_type)
+{
+ str->append(query_name);
+ str->append(STRING_WITH_LEN(" AS "));
+ str->append('(');
+ spec->print(str, query_type);
+ str->append(')');
+}
+
diff --git a/sql/sql_cte.h b/sql/sql_cte.h
new file mode 100644
index 00000000000..0cbc9247af9
--- /dev/null
+++ b/sql/sql_cte.h
@@ -0,0 +1,178 @@
+#ifndef SQL_CTE_INCLUDED
+#define SQL_CTE_INCLUDED
+#include "sql_list.h"
+#include "sql_lex.h"
+
+class With_clause;
+
+/**
+ @class With_clause
+ @brief Set of with_elements
+
+ It has a reference to the first with element from this with clause.
+ This reference allows to navigate through all the elements of the with clause.
+ It contains a reference to the unit to which this with clause is attached.
+ It also contains a flag saying whether this with clause was specified as recursive.
+*/
+
+class With_element : public Sql_alloc
+{
+private:
+ With_clause *owner; // with clause this object belongs to
+ With_element *next_elem; // next element in the with clause
+ uint number; // number of the element in the with clause (starting from 0)
+ /*
+ The map dependency_map has 1 in the i-th position if the query that
+ specifies this element contains a reference to the element number i
+ in the query FROM list.
+ */
+ table_map elem_map; // The map where with only one 1 set in this->number
+ table_map dependency_map;
+ /*
+ Total number of references to this element in the FROM lists of
+ the queries that are in the scope of the element (including
+ subqueries and specifications of other with elements).
+ */
+ uint references;
+ /*
+ Unparsed specification of the query that specifies this element.
+ It used to build clones of the specification if they are needed.
+ */
+ LEX_STRING unparsed_spec;
+
+ /* Return the map where 1 is set only in the position for this element */
+ table_map get_elem_map() { return 1 << number; }
+
+public:
+ /*
+ The name of the table introduced by this with elememt. The name
+ can be used in FROM lists of the queries in the scope of the element.
+ */
+ LEX_STRING *query_name;
+ /*
+ Optional list of column names to name the columns of the table introduced
+ by this with element. It is used in the case when the names are not
+ inherited from the query that specified the table. Otherwise the list is
+ always empty.
+ */
+ List <LEX_STRING> column_list;
+ /* The query that specifies the table introduced by this with element */
+ st_select_lex_unit *spec;
+ /*
+ Set to true is recursion is used (directly or indirectly)
+ for the definition of this element
+ */
+ bool is_recursive;
+
+ With_element(LEX_STRING *name,
+ List <LEX_STRING> list,
+ st_select_lex_unit *unit)
+ : next_elem(NULL), dependency_map(0), references(0),
+ query_name(name), column_list(list), spec(unit),
+ is_recursive(false) {}
+
+ void check_dependencies_in_unit(st_select_lex_unit *unit);
+
+ void set_dependency_on(With_element *with_elem)
+ { dependency_map|= with_elem->get_elem_map(); }
+
+ bool check_dependency_on(With_element *with_elem)
+ { return dependency_map & with_elem->get_elem_map(); }
+
+ bool set_unparsed_spec(THD *thd, char *spec_start, char *spec_end);
+
+ st_select_lex_unit *clone_parsed_spec(THD *thd, TABLE_LIST *with_table);
+
+ bool is_referenced() { return references != 0; }
+
+ void inc_references() { references++; }
+
+ bool rename_columns_of_derived_unit(THD *thd, st_select_lex_unit *unit);
+
+ bool prepare_unreferenced(THD *thd);
+
+ void print(String *str, enum_query_type query_type);
+
+ friend class With_clause;
+};
+
+
+/**
+ @class With_element
+ @brief Definition of a CTE table
+
+ It contains a reference to the name of the table introduced by this with element,
+ and a reference to the unit that specificies this table. Also it contains
+ a reference to the with clause to which this element belongs to.
+*/
+
+class With_clause : public Sql_alloc
+{
+private:
+ st_select_lex_unit *owner; // the unit this with clause attached to
+ With_element *first_elem; // the first definition in this with clause
+ With_element **last_next; // here is set the link for the next added element
+ uint elements; // number of the elements/defintions in this with clauses
+ /*
+ The with clause immediately containing this with clause if there is any,
+ otherwise NULL. Now used only at parsing.
+ */
+ With_clause *embedding_with_clause;
+ /*
+ The next with the clause of the chain of with clauses encountered
+ in the current statement
+ */
+ With_clause *next_with_clause;
+ /* Set to true if dependencies between with elements have been checked */
+ bool dependencies_are_checked;
+
+public:
+ /* If true the specifier RECURSIVE is present in the with clause */
+ bool with_recursive;
+
+ With_clause(bool recursive_fl, With_clause *emb_with_clause)
+ : owner(NULL), first_elem(NULL), elements(0),
+ embedding_with_clause(emb_with_clause), next_with_clause(NULL),
+ dependencies_are_checked(false),
+ with_recursive(recursive_fl)
+ { last_next= &first_elem; }
+
+ /* Add a new element to the current with clause */
+ bool add_with_element(With_element *elem)
+ {
+ elem->owner= this;
+ elem->number= elements;
+ owner= elem->spec;
+ owner->with_element= elem;
+ *last_next= elem;
+ last_next= &elem->next_elem;
+ elements++;
+ return false;
+ }
+
+ /* Add this with clause to the list of with clauses used in the statement */
+ void add_to_list(With_clause ** &last_next)
+ {
+ *last_next= this;
+ last_next= &this->next_with_clause;
+ }
+
+ With_clause *pop() { return embedding_with_clause; }
+
+ bool check_dependencies();
+
+ With_element *find_table_def(TABLE_LIST *table);
+
+ With_element *find_table_def_in_with_clauses(TABLE_LIST *table);
+
+ bool prepare_unreferenced_elements(THD *thd);
+
+ void print(String *str, enum_query_type query_type);
+
+ friend
+ bool check_dependencies_in_with_clauses(With_clause *with_clauses_list);
+
+};
+
+
+#endif /* SQL_CTE_INCLUDED */
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index f49a053918b..42e7f6c3569 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -40,6 +40,8 @@
#include "sql_statistics.h"
#include "transaction.h"
#include "records.h" // init_read_record,
+#include "filesort.h"
+#include "uniques.h"
#include "sql_derived.h" // mysql_handle_list_of_derived
// end_read_record
#include "sql_partition.h" // make_used_partitions_str
@@ -227,10 +229,12 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
int error, loc_error;
TABLE *table;
SQL_SELECT *select=0;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
bool using_limit=limit != HA_POS_ERROR;
bool transactional_table, safe_update, const_cond;
bool const_cond_result;
+ bool return_error= 0;
ha_rows deleted= 0;
bool reverse= FALSE;
ORDER *order= (ORDER *) ((order_list && order_list->elements) ?
@@ -405,7 +409,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->covering_keys.clear_all();
table->quick_keys.clear_all(); // Can't use 'only index'
- select=make_select(table, 0, 0, conds, 0, &error);
+ select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
if (error)
DBUG_RETURN(TRUE);
if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
@@ -486,32 +490,21 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (query_plan.using_filesort)
{
- ha_rows examined_rows;
- ha_rows found_rows;
uint length= 0;
SORT_FIELD *sortorder;
{
DBUG_ASSERT(query_plan.index == MAX_KEY);
- table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
Filesort_tracker *fs_tracker=
thd->lex->explain->get_upd_del_plan()->filesort_tracker;
if (!(sortorder= make_unireg_sortorder(thd, order, &length, NULL)) ||
- (table->sort.found_records= filesort(thd, table, sortorder, length,
- select, HA_POS_ERROR,
- true,
- &examined_rows, &found_rows,
- fs_tracker))
- == HA_POS_ERROR)
- {
- delete select;
- free_underlaid_joins(thd, &thd->lex->select_lex);
- DBUG_RETURN(TRUE);
- }
- thd->inc_examined_row_count(examined_rows);
+ !(file_sort= filesort(thd, table, sortorder, length,
+ select, HA_POS_ERROR,
+ true,
+ fs_tracker)))
+ goto got_error;
+ thd->inc_examined_row_count(file_sort->examined_rows);
/*
Filesort has already found and selected the rows we want to delete,
so we don't need the where clause
@@ -524,24 +517,16 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
- {
- delete select;
- free_underlaid_joins(thd, select_lex);
- DBUG_RETURN(TRUE);
- }
+ goto got_error;
if (query_plan.index == MAX_KEY || (select && select->quick))
- error= init_read_record(&info, thd, table, select, 1, 1, FALSE);
+ error= init_read_record(&info, thd, table, select, file_sort, 1, 1, FALSE);
else
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
reverse);
if (error)
- {
- delete select;
- free_underlaid_joins(thd, select_lex);
- DBUG_RETURN(TRUE);
- }
-
+ goto got_error;
+
init_ftfuncs(thd, select_lex, 1);
THD_STAGE_INFO(thd, stage_updating);
@@ -697,8 +682,6 @@ cleanup:
}
DBUG_ASSERT(transactional_table || !deleted || thd->transaction.stmt.modified_non_trans_table);
-
- free_underlaid_joins(thd, select_lex);
if (error < 0 ||
(thd->lex->ignore && !thd->is_error() && !thd->is_fatal_error))
{
@@ -711,6 +694,8 @@ cleanup:
my_ok(thd, deleted);
DBUG_PRINT("info",("%ld records deleted",(long) deleted));
}
+ delete file_sort;
+ free_underlaid_joins(thd, select_lex);
DBUG_RETURN(error >= 0 || thd->is_error());
/* Special exits */
@@ -729,9 +714,16 @@ send_nothing_and_leave:
*/
delete select;
+ delete file_sort;
free_underlaid_joins(thd, select_lex);
//table->set_keyread(false);
- DBUG_RETURN((thd->is_error() || thd->killed) ? 1 : 0);
+
+ DBUG_ASSERT(!return_error || thd->is_error() || thd->killed);
+ DBUG_RETURN((return_error || thd->is_error() || thd->killed) ? 1 : 0);
+
+got_error:
+ return_error= 1;
+ goto send_nothing_and_leave;
}
@@ -1183,7 +1175,8 @@ int multi_delete::do_deletes()
if (tempfiles[counter]->get(table))
DBUG_RETURN(1);
- local_error= do_table_deletes(table, thd->lex->ignore);
+ local_error= do_table_deletes(table, &tempfiles[counter]->sort,
+ thd->lex->ignore);
if (thd->killed && !local_error)
DBUG_RETURN(1);
@@ -1213,14 +1206,15 @@ int multi_delete::do_deletes()
@retval 1 Triggers or handler reported error.
@retval -1 End of file from handler.
*/
-int multi_delete::do_table_deletes(TABLE *table, bool ignore)
+int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
+ bool ignore)
{
int local_error= 0;
READ_RECORD info;
ha_rows last_deleted= deleted;
DBUG_ENTER("do_deletes_for_table");
- if (init_read_record(&info, thd, table, NULL, 0, 1, FALSE))
+ if (init_read_record(&info, thd, table, NULL, sort_info, 0, 1, FALSE))
DBUG_RETURN(1);
/*
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index baba4a876b3..1ef83b3bf1f 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -30,6 +30,7 @@
#include "sql_base.h"
#include "sql_view.h" // check_duplicate_names
#include "sql_acl.h" // SELECT_ACL
+#include "sql_cte.h"
typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived);
@@ -670,6 +671,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
// st_select_lex_unit::prepare correctly work for single select
if ((res= unit->prepare(thd, derived->derived_result, 0)))
goto exit;
+ if (derived->with &&
+ (res= derived->with->rename_columns_of_derived_unit(thd, unit)))
+ goto exit;
lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
if ((res= check_duplicate_names(thd, unit->types, 0)))
goto exit;
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index a0e836da203..a50b90fc111 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -194,7 +194,8 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
DBUG_ENTER("search_topics");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, topics, select, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, topics, select, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info))
@@ -229,14 +230,16 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
2 found more then one topic matching the mask
*/
-int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
+int search_keyword(THD *thd, TABLE *keywords,
+ struct st_find_field *find_fields,
SQL_SELECT *select, int *key_id)
{
int count= 0;
READ_RECORD read_record_info;
DBUG_ENTER("search_keyword");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, keywords, select, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, keywords, select, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info) && count<2)
@@ -370,7 +373,8 @@ int search_categories(THD *thd, TABLE *categories,
DBUG_ENTER("search_categories");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, categories, select,1,0,FALSE))
+ if (init_read_record(&read_record_info, thd, categories, select, NULL,
+ 1, 0, FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info))
{
@@ -406,7 +410,8 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
DBUG_ENTER("get_all_items_for_category");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, items, select,1,0,FALSE))
+ if (init_read_record(&read_record_info, thd, items, select, NULL, 1, 0,
+ FALSE))
DBUG_VOID_RETURN;
while (!read_record_info.read_record(&read_record_info))
@@ -608,7 +613,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond,
/* Assume that no indexes cover all required fields */
table->covering_keys.clear_all();
- SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error);
+ SQL_SELECT *res= make_select(table, 0, 0, cond, 0, 0, error);
if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) ||
(res && res->quick && res->quick->reset()))
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f77d8cb1984..9817b882bdd 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -3897,7 +3897,7 @@ Field *Item::create_field_for_create_select(THD *thd, TABLE *table)
{
Field *def_field, *tmp_field;
return ::create_tmp_field(thd, table, this, type(),
- (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0, 0);
+ (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0);
}
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index f84440afa90..818598110ca 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -2206,7 +2206,7 @@ finish:
for a match for any record from join_tab. To iterate over the candidates
for a match the virtual function get_next_candidate_for_match is used,
while the virtual function prepare_look_for_matches is called to prepare
- for such iteration proccess.
+ for such iteration process.
NOTES
The function produces all matching extensions for the records in the
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index e93ba7fc10f..65257c9b2ce 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -29,6 +29,7 @@
#include "sp_head.h"
#include "sp.h"
#include "sql_select.h"
+#include "sql_cte.h"
static int lex_one_token(YYSTYPE *yylval, THD *thd);
@@ -668,11 +669,15 @@ void lex_start(THD *thd)
/* 'parent_lex' is used in init_query() so it must be before it. */
lex->select_lex.parent_lex= lex;
lex->select_lex.init_query();
+ lex->curr_with_clause= 0;
+ lex->with_clauses_list= 0;
+ lex->with_clauses_list_last_next= &lex->with_clauses_list;
lex->value_list.empty();
lex->update_list.empty();
lex->set_var_list.empty();
lex->param_list.empty();
lex->view_list.empty();
+ lex->with_column_list.empty();
lex->with_persistent_for_clause= FALSE;
lex->column_list= NULL;
lex->index_list= NULL;
@@ -1406,28 +1411,22 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
if (use_mb(cs))
{
result_state= IDENT_QUOTED;
- if (my_mbcharlen(cs, lip->yyGetLast()) > 1)
+ int char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
{
- int l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query());
- if (l == 0) {
- state = MY_LEX_CHAR;
- continue;
- }
- lip->skip_binary(l - 1);
+ state= MY_LEX_CHAR;
+ continue;
}
+ lip->skip_binary(char_length - 1);
+
while (ident_map[c=lip->yyGet()])
{
- if (my_mbcharlen(cs, c) > 1)
- {
- int l;
- if ((l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query())) == 0)
- break;
- lip->skip_binary(l-1);
- }
+ char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
+ break;
+ lip->skip_binary(char_length - 1);
}
}
else
@@ -1568,15 +1567,11 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
result_state= IDENT_QUOTED;
while (ident_map[c=lip->yyGet()])
{
- if (my_mbcharlen(cs, c) > 1)
- {
- int l;
- if ((l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query())) == 0)
- break;
- lip->skip_binary(l-1);
- }
+ int char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
+ break;
+ lip->skip_binary(char_length - 1);
}
}
else
@@ -1604,8 +1599,9 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
char quote_char= c; // Used char
while ((c=lip->yyGet()))
{
- int var_length;
- if ((var_length= my_mbcharlen(cs, c)) == 1)
+ int var_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (var_length == 1)
{
if (c == quote_char)
{
@@ -1617,11 +1613,9 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
}
}
#ifdef USE_MB
- else if (use_mb(cs))
+ else if (var_length > 1)
{
- if ((var_length= my_ismbchar(cs, lip->get_ptr() - 1,
- lip->get_end_of_query())))
- lip->skip_binary(var_length-1);
+ lip->skip_binary(var_length - 1);
}
#endif
}
@@ -2076,6 +2070,9 @@ void st_select_lex_unit::init_query()
found_rows_for_union= 0;
insert_table_with_stored_vcol= 0;
derived= 0;
+ with_clause= 0;
+ with_element= 0;
+ columns_are_renamed= false;
}
void st_select_lex::init_query()
@@ -2259,6 +2256,37 @@ void st_select_lex_node::fast_exclude()
}
+/**
+ @brief
+ Insert a new chain of nodes into another chain before a particular link
+
+ @param in/out
+ ptr_pos_to_insert the address of the chain pointer pointing to the link
+ before which the subchain has to be inserted
+ @param
+ end_chain_node the last link of the subchain to be inserted
+
+ @details
+ The method inserts the chain of nodes starting from this node and ending
+ with the node nd_chain_node into another chain of nodes before the node
+ pointed to by *ptr_pos_to_insert.
+ It is assumed that ptr_pos_to_insert belongs to the chain where we insert.
+ So it must be updated.
+
+ @retval
+ The method returns the pointer to the first link of the inserted chain
+*/
+
+st_select_lex_node *st_select_lex_node:: insert_chain_before(
+ st_select_lex_node **ptr_pos_to_insert,
+ st_select_lex_node *end_chain_node)
+{
+ end_chain_node->link_next= *ptr_pos_to_insert;
+ (*ptr_pos_to_insert)->link_prev= &end_chain_node->link_next;
+ this->link_prev= ptr_pos_to_insert;
+ return this;
+}
+
/*
Exclude a node from the tree lex structure, but leave it in the global
list of nodes.
@@ -2648,6 +2676,8 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
void st_select_lex_unit::print(String *str, enum_query_type query_type)
{
bool union_all= !union_distinct;
+ if (with_clause)
+ with_clause->print(str, query_type);
for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
{
if (sl != first_select())
@@ -2688,30 +2718,22 @@ void st_select_lex::print_order(String *str,
{
if (order->counter_used)
{
- if (query_type != QT_VIEW_INTERNAL)
+ char buffer[20];
+ size_t length= my_snprintf(buffer, 20, "%d", order->counter);
+ str->append(buffer, (uint) length);
+ }
+ else
+ {
+ /* replace numeric reference with equivalent for ORDER constant */
+ if (order->item[0]->type() == Item::INT_ITEM &&
+ order->item[0]->basic_const_item())
{
- char buffer[20];
- size_t length= my_snprintf(buffer, 20, "%d", order->counter);
- str->append(buffer, (uint) length);
+ /* make it expression instead of integer constant */
+ str->append(STRING_WITH_LEN("''"));
}
else
- {
- /* replace numeric reference with expression */
- if (order->item[0]->type() == Item::INT_ITEM &&
- order->item[0]->basic_const_item())
- {
- char buffer[20];
- size_t length= my_snprintf(buffer, 20, "%d", order->counter);
- str->append(buffer, (uint) length);
- /* make it expression instead of integer constant */
- str->append(STRING_WITH_LEN("+0"));
- }
- else
- (*order->item)->print(str, query_type);
- }
+ (*order->item)->print(str, query_type);
}
- else
- (*order->item)->print(str, query_type);
if (!order->asc)
str->append(STRING_WITH_LEN(" desc"));
if (order->next)
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index bda4ceb7b91..c64ed6b8d5c 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -48,6 +48,8 @@ class Item_func_match;
class File_parser;
class Key_part_spec;
struct sql_digest_state;
+class With_clause;
+
#define ALLOC_ROOT_SET 1024
@@ -178,6 +180,7 @@ const LEX_STRING sp_data_access_name[]=
#define DERIVED_SUBQUERY 1
#define DERIVED_VIEW 2
+#define DERIVED_WITH 4
enum enum_view_create_mode
{
@@ -540,7 +543,9 @@ public:
List<String> *partition_names= 0,
LEX_STRING *option= 0);
virtual void set_lock_for_tables(thr_lock_type lock_type) {}
-
+ void set_slave(st_select_lex_node *slave_arg) { slave= slave_arg; }
+ st_select_lex_node *insert_chain_before(st_select_lex_node **ptr_pos_to_insert,
+ st_select_lex_node *end_chain_node);
friend class st_select_lex_unit;
friend bool mysql_new_select(LEX *lex, bool move_down);
friend bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
@@ -627,7 +632,7 @@ public:
return saved_fake_select_lex;
return first_select();
};
- //node on wich we should return current_select pointer after parsing subquery
+ //node on which we should return current_select pointer after parsing subquery
st_select_lex *return_to;
/* LIMIT clause runtime counters */
ha_rows select_limit_cnt, offset_limit_cnt;
@@ -638,6 +643,10 @@ public:
derived tables/views handling.
*/
TABLE_LIST *derived;
+ /* With clause attached to this unit (if any) */
+ With_clause *with_clause;
+ /* With element where this unit is used as the specification (if any) */
+ With_element *with_element;
/* thread handler */
THD *thd;
/*
@@ -646,7 +655,7 @@ public:
*/
st_select_lex *fake_select_lex;
/**
- SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when no
+ SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when noq
fake_select_lex is used.
*/
st_select_lex *saved_fake_select_lex;
@@ -662,12 +671,15 @@ public:
*/
TABLE *insert_table_with_stored_vcol;
+ bool columns_are_renamed;
+
void init_query();
st_select_lex* outer_select();
st_select_lex* first_select()
{
return reinterpret_cast<st_select_lex*>(slave);
}
+ void set_with_clause(With_clause *with_cl) { with_clause= with_cl; }
st_select_lex_unit* next_unit()
{
return reinterpret_cast<st_select_lex_unit*>(next);
@@ -1062,6 +1074,19 @@ public:
void set_non_agg_field_used(bool val) { m_non_agg_field_used= val; }
void set_agg_func_used(bool val) { m_agg_func_used= val; }
+ void set_with_clause(With_clause *with_clause)
+ {
+ master_unit()->with_clause= with_clause;
+ }
+ With_clause *get_with_clause()
+ {
+ return master_unit()->with_clause;
+ }
+ With_element *get_with_element()
+ {
+ return master_unit()->with_element;
+ }
+ With_element *find_table_def_in_with_clauses(TABLE_LIST *table);
private:
bool m_non_agg_field_used;
@@ -2409,7 +2434,16 @@ struct LEX: public Query_tables_list
SELECT_LEX *current_select;
/* list of all SELECT_LEX */
SELECT_LEX *all_selects_list;
-
+ /* current with clause in parsing if any, otherwise 0*/
+ With_clause *curr_with_clause;
+ /* pointer to the first with clause in the current statemant */
+ With_clause *with_clauses_list;
+ /*
+ (*with_clauses_list_last_next) contains a pointer to the last
+ with clause in the current statement
+ */
+ With_clause **with_clauses_list_last_next;
+
/* Query Plan Footprint of a currently running select */
Explain_query *explain;
@@ -2475,6 +2509,7 @@ public:
List<Item_func_set_user_var> set_var_list; // in-query assignment list
List<Item_param> param_list;
List<LEX_STRING> view_list; // view list (list of field names in view)
+ List<LEX_STRING> with_column_list; // list of column names in with_list_element
List<LEX_STRING> *column_list; // list of column names (in ANALYZE)
List<LEX_STRING> *index_list; // list of index names (in ANALYZE)
/*
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index d43eb884abd..a1bb84cf328 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -61,24 +61,64 @@ XML_TAG::XML_TAG(int l, String f, String v)
}
+/*
+ Field and line terminators must be interpreted as sequence of unsigned char.
+ Otherwise, non-ascii terminators will be negative on some platforms,
+ and positive on others (depending on the implementation of char).
+*/
+class Term_string
+{
+ const uchar *m_ptr;
+ uint m_length;
+ int m_initial_byte;
+public:
+ Term_string(const String &str) :
+ m_ptr(static_cast<const uchar*>(static_cast<const void*>(str.ptr()))),
+ m_length(str.length()),
+ m_initial_byte((uchar) (str.length() ? str.ptr()[0] : INT_MAX))
+ { }
+ void set(const uchar *str, uint length, int initial_byte)
+ {
+ m_ptr= str;
+ m_length= length;
+ m_initial_byte= initial_byte;
+ }
+ void reset() { set(NULL, 0, INT_MAX); }
+ const uchar *ptr() const { return m_ptr; }
+ uint length() const { return m_length; }
+ int initial_byte() const { return m_initial_byte; }
+ bool eq(const Term_string &other) const
+ {
+ return length() == other.length() && !memcmp(ptr(), other.ptr(), length());
+ }
+};
+
+
#define GET (stack_pos != stack ? *--stack_pos : my_b_get(&cache))
#define PUSH(A) *(stack_pos++)=(A)
class READ_INFO {
File file;
- uchar *buffer, /* Buffer for read text */
- *end_of_buff; /* Data in bufferts ends here */
- uint buff_length, /* Length of buffert */
- max_length; /* Max length of row */
- const uchar *field_term_ptr,*line_term_ptr;
- const char *line_start_ptr,*line_start_end;
- uint field_term_length,line_term_length,enclosed_length;
- int field_term_char,line_term_char,enclosed_char,escape_char;
+ String data; /* Read buffer */
+ uint fixed_length; /* Length of the fixed length record */
+ uint max_length; /* Max length of row */
+ Term_string m_field_term; /* FIELDS TERMINATED BY 'string' */
+ Term_string m_line_term; /* LINES TERMINATED BY 'string' */
+ Term_string m_line_start; /* LINES STARTING BY 'string' */
+ int enclosed_char,escape_char;
int *stack,*stack_pos;
bool found_end_of_line,start_of_line,eof;
NET *io_net;
int level; /* for load xml */
+ bool getbyte(char *to)
+ {
+ int chr= GET;
+ if (chr == my_b_EOF)
+ return (eof= true);
+ *to= chr;
+ return false;
+ }
public:
bool error,line_cuted,found_null,enclosed;
uchar *row_start, /* Found row starts here */
@@ -94,7 +134,11 @@ public:
int read_fixed_length(void);
int next_line(void);
char unescape(char chr);
- int terminator(const uchar *ptr, uint length);
+ bool terminator(const uchar *ptr, uint length);
+ bool terminator(const Term_string &str)
+ { return terminator(str.ptr(), str.length()); }
+ bool terminator(int chr, const Term_string &str)
+ { return str.initial_byte() == chr && terminator(str); }
bool find_start_of_fields();
/* load xml */
List<XML_TAG> taglist;
@@ -1341,63 +1385,40 @@ READ_INFO::READ_INFO(THD *thd, File file_par, uint tot_length, CHARSET_INFO *cs,
String &field_term, String &line_start, String &line_term,
String &enclosed_par, int escape, bool get_it_from_net,
bool is_fifo)
- :file(file_par), buffer(NULL), buff_length(tot_length), escape_char(escape),
- found_end_of_line(false), eof(false),
+ :file(file_par), fixed_length(tot_length),
+ m_field_term(field_term), m_line_term(line_term), m_line_start(line_start),
+ escape_char(escape), found_end_of_line(false), eof(false),
error(false), line_cuted(false), found_null(false), read_charset(cs)
{
+ data.set_thread_specific();
/*
Field and line terminators must be interpreted as sequence of unsigned char.
Otherwise, non-ascii terminators will be negative on some platforms,
and positive on others (depending on the implementation of char).
*/
- field_term_ptr=
- static_cast<const uchar*>(static_cast<const void*>(field_term.ptr()));
- field_term_length= field_term.length();
- line_term_ptr=
- static_cast<const uchar*>(static_cast<const void*>(line_term.ptr()));
- line_term_length= line_term.length();
level= 0; /* for load xml */
- if (line_start.length() == 0)
- {
- line_start_ptr=0;
- start_of_line= 0;
- }
- else
- {
- line_start_ptr= line_start.ptr();
- line_start_end=line_start_ptr+line_start.length();
- start_of_line= 1;
- }
+ start_of_line= line_start.length() != 0;
/* If field_terminator == line_terminator, don't use line_terminator */
- if (field_term_length == line_term_length &&
- !memcmp(field_term_ptr,line_term_ptr,field_term_length))
- {
- line_term_length=0;
- line_term_ptr= NULL;
- }
- enclosed_char= (enclosed_length=enclosed_par.length()) ?
- (uchar) enclosed_par[0] : INT_MAX;
- field_term_char= field_term_length ? field_term_ptr[0] : INT_MAX;
- line_term_char= line_term_length ? line_term_ptr[0] : INT_MAX;
+ if (m_field_term.eq(m_line_term))
+ m_line_term.reset();
+ enclosed_char= enclosed_par.length() ? (uchar) enclosed_par[0] : INT_MAX;
/* Set of a stack for unget if long terminators */
- uint length= MY_MAX(cs->mbmaxlen, MY_MAX(field_term_length, line_term_length)) + 1;
+ uint length= MY_MAX(cs->mbmaxlen, MY_MAX(m_field_term.length(),
+ m_line_term.length())) + 1;
set_if_bigger(length,line_start.length());
stack= stack_pos= (int*) thd->alloc(sizeof(int) * length);
- if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(MY_THREAD_SPECIFIC))))
+ if (data.reserve(tot_length))
error=1; /* purecov: inspected */
else
{
- end_of_buff=buffer+buff_length;
if (init_io_cache(&cache,(get_it_from_net) ? -1 : file, 0,
(get_it_from_net) ? READ_NET :
(is_fifo ? READ_FIFO : READ_CACHE),0L,1,
MYF(MY_WME | MY_THREAD_SPECIFIC)))
{
- my_free(buffer); /* purecov: inspected */
- buffer= NULL;
error=1;
}
else
@@ -1420,7 +1441,6 @@ READ_INFO::READ_INFO(THD *thd, File file_par, uint tot_length, CHARSET_INFO *cs,
READ_INFO::~READ_INFO()
{
::end_io_cache(&cache);
- my_free(buffer);
List_iterator<XML_TAG> xmlit(taglist);
XML_TAG *t;
while ((t= xmlit++))
@@ -1428,7 +1448,7 @@ READ_INFO::~READ_INFO()
}
-inline int READ_INFO::terminator(const uchar *ptr,uint length)
+inline bool READ_INFO::terminator(const uchar *ptr, uint length)
{
int chr=0; // Keep gcc happy
uint i;
@@ -1440,18 +1460,17 @@ inline int READ_INFO::terminator(const uchar *ptr,uint length)
}
}
if (i == length)
- return 1;
+ return true;
PUSH(chr);
while (i-- > 1)
PUSH(*--ptr);
- return 0;
+ return false;
}
int READ_INFO::read_field()
{
int chr,found_enclosed_char;
- uchar *to,*new_buffer;
found_null=0;
if (found_end_of_line)
@@ -1470,11 +1489,11 @@ int READ_INFO::read_field()
found_end_of_line=eof=1;
return 1;
}
- to=buffer;
+ data.length(0);
if (chr == enclosed_char)
{
found_enclosed_char=enclosed_char;
- *to++=(uchar) chr; // If error
+ data.append(chr); // If error
}
else
{
@@ -1484,7 +1503,8 @@ int READ_INFO::read_field()
for (;;)
{
- while ( to < end_of_buff)
+ // Make sure we have enough space for the longest multi-byte character.
+ while (data.length() + read_charset->mbmaxlen <= data.alloced_length())
{
chr = GET;
if (chr == my_b_EOF)
@@ -1493,7 +1513,7 @@ int READ_INFO::read_field()
{
if ((chr=GET) == my_b_EOF)
{
- *to++= (uchar) escape_char;
+ data.append(escape_char);
goto found_eof;
}
/*
@@ -1505,24 +1525,24 @@ int READ_INFO::read_field()
*/
if (escape_char != enclosed_char || chr == escape_char)
{
- *to++ = (uchar) unescape((char) chr);
+ data.append(unescape((char) chr));
continue;
}
PUSH(chr);
chr= escape_char;
}
#ifdef ALLOW_LINESEPARATOR_IN_STRINGS
- if (chr == line_term_char)
+ if (chr == m_line_term.initial_byte())
#else
- if (chr == line_term_char && found_enclosed_char == INT_MAX)
+ if (chr == m_line_term.initial_byte() && found_enclosed_char == INT_MAX)
#endif
{
- if (terminator(line_term_ptr,line_term_length))
+ if (terminator(m_line_term))
{ // Maybe unexpected linefeed
enclosed=0;
found_end_of_line=1;
- row_start=buffer;
- row_end= to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
}
@@ -1530,27 +1550,24 @@ int READ_INFO::read_field()
{
if ((chr=GET) == found_enclosed_char)
{ // Remove dupplicated
- *to++ = (uchar) chr;
+ data.append(chr);
continue;
}
// End of enclosed field if followed by field_term or line_term
- if (chr == my_b_EOF ||
- (chr == line_term_char && terminator(line_term_ptr,
- line_term_length)))
+ if (chr == my_b_EOF || terminator(chr, m_line_term))
{
/* Maybe unexpected linefeed */
enclosed=1;
found_end_of_line=1;
- row_start=buffer+1;
- row_end= to;
+ row_start= (uchar *) data.ptr() + 1;
+ row_end= (uchar *) data.end();
return 0;
}
- if (chr == field_term_char &&
- terminator(field_term_ptr,field_term_length))
+ if (terminator(chr, m_field_term))
{
enclosed=1;
- row_start=buffer+1;
- row_end= to;
+ row_start= (uchar *) data.ptr() + 1;
+ row_end= (uchar *) data.end();
return 0;
}
/*
@@ -1561,68 +1578,58 @@ int READ_INFO::read_field()
/* copy the found term character to 'to' */
chr= found_enclosed_char;
}
- else if (chr == field_term_char && found_enclosed_char == INT_MAX)
+ else if (chr == m_field_term.initial_byte() &&
+ found_enclosed_char == INT_MAX)
{
- if (terminator(field_term_ptr,field_term_length))
+ if (terminator(m_field_term))
{
enclosed=0;
- row_start=buffer;
- row_end= to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
}
-#ifdef USE_MB
- if (my_mbcharlen(read_charset, chr) > 1 &&
- to + my_mbcharlen(read_charset, chr) <= end_of_buff)
+ data.append(chr);
+ if (use_mb(read_charset))
{
- uchar* p= to;
- int ml, i;
- *to++ = chr;
-
- ml= my_mbcharlen(read_charset, chr);
-
- for (i= 1; i < ml; i++)
+ int chlen;
+ if ((chlen= my_charlen(read_charset, data.end() - 1,
+ data.end())) != 1)
{
- chr= GET;
- if (chr == my_b_EOF)
+ for (uint32 length0= data.length() - 1 ; MY_CS_IS_TOOSMALL(chlen); )
{
- /*
- Need to back up the bytes already ready from illformed
- multi-byte char
- */
- to-= i;
- goto found_eof;
+ chr= GET;
+ if (chr == my_b_EOF)
+ goto found_eof;
+ data.append(chr);
+ chlen= my_charlen(read_charset, data.ptr() + length0, data.end());
+ if (chlen == MY_CS_ILSEQ)
+ {
+ /**
+ It has been an incomplete (but a valid) sequence so far,
+ but the last byte turned it into a bad byte sequence.
+ Unget the very last byte.
+ */
+ data.length(data.length() - 1);
+ PUSH(chr);
+ break;
+ }
}
- *to++ = chr;
}
- if (my_ismbchar(read_charset,
- (const char *)p,
- (const char *)to))
- continue;
- for (i= 0; i < ml; i++)
- PUSH(*--to);
- chr= GET;
}
-#endif
- *to++ = (uchar) chr;
}
/*
** We come here if buffer is too small. Enlarge it and continue
*/
- if (!(new_buffer=(uchar*) my_realloc((char*) buffer,buff_length+1+IO_SIZE,
- MYF(MY_WME | MY_THREAD_SPECIFIC))))
- return (error=1);
- to=new_buffer + (to-buffer);
- buffer=new_buffer;
- buff_length+=IO_SIZE;
- end_of_buff=buffer+buff_length;
+ if (data.reserve(IO_SIZE))
+ return (error= 1);
}
found_eof:
enclosed=0;
found_end_of_line=eof=1;
- row_start=buffer;
- row_end=to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
@@ -1644,7 +1651,6 @@ found_eof:
int READ_INFO::read_fixed_length()
{
int chr;
- uchar *to;
if (found_end_of_line)
return 1; // One have to call next_line
@@ -1655,8 +1661,7 @@ int READ_INFO::read_fixed_length()
return 1;
}
- to=row_start=buffer;
- while (to < end_of_buff)
+ for (data.length(0); data.length() < fixed_length ; )
{
if ((chr=GET) == my_b_EOF)
goto found_eof;
@@ -1664,105 +1669,129 @@ int READ_INFO::read_fixed_length()
{
if ((chr=GET) == my_b_EOF)
{
- *to++= (uchar) escape_char;
+ data.append(escape_char);
goto found_eof;
}
- *to++ =(uchar) unescape((char) chr);
+ data.append((uchar) unescape((char) chr));
continue;
}
- if (chr == line_term_char)
- {
- if (terminator(line_term_ptr,line_term_length))
- { // Maybe unexpected linefeed
- found_end_of_line=1;
- row_end= to;
- return 0;
- }
+ if (terminator(chr, m_line_term))
+ { // Maybe unexpected linefeed
+ found_end_of_line= true;
+ break;
}
- *to++ = (uchar) chr;
+ data.append(chr);
}
- row_end=to; // Found full line
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end(); // Found full line
return 0;
found_eof:
found_end_of_line=eof=1;
- row_start=buffer;
- row_end=to;
- return to == buffer ? 1 : 0;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
+ return data.length() == 0 ? 1 : 0;
}
int READ_INFO::next_line()
{
line_cuted=0;
- start_of_line= line_start_ptr != 0;
+ start_of_line= m_line_start.length() != 0;
if (found_end_of_line || eof)
{
found_end_of_line=0;
return eof;
}
found_end_of_line=0;
- if (!line_term_length)
+ if (!m_line_term.length())
return 0; // No lines
for (;;)
{
- int chr = GET;
-#ifdef USE_MB
- if (my_mbcharlen(read_charset, chr) > 1)
- {
- for (uint i=1;
- chr != my_b_EOF && i<my_mbcharlen(read_charset, chr);
- i++)
- chr = GET;
- if (chr == escape_char)
- continue;
- }
-#endif
- if (chr == my_b_EOF)
- {
- eof=1;
- return 1;
+ int chlen;
+ char buf[MY_CS_MBMAXLEN];
+
+ if (getbyte(&buf[0]))
+ return 1; // EOF
+
+ if (use_mb(read_charset) &&
+ (chlen= my_charlen(read_charset, buf, buf + 1)) != 1)
+ {
+ uint i;
+ for (i= 1; MY_CS_IS_TOOSMALL(chlen); )
+ {
+ DBUG_ASSERT(i < sizeof(buf));
+ DBUG_ASSERT(chlen != 1);
+ if (getbyte(&buf[i++]))
+ return 1; // EOF
+ chlen= my_charlen(read_charset, buf, buf + i);
+ }
+
+ /*
+ Either a complete multi-byte sequence,
+ or a broken byte sequence was found.
+ Check if the sequence is a prefix of the "LINES TERMINATED BY" string.
+ */
+ if ((uchar) buf[0] == m_line_term.initial_byte() &&
+ i <= m_line_term.length() &&
+ !memcmp(buf, m_line_term.ptr(), i))
+ {
+ if (m_line_term.length() == i)
+ {
+ /*
+ We found a "LINES TERMINATED BY" string that consists
+ of a single multi-byte character.
+ */
+ return 0;
+ }
+ /*
+ buf[] is a prefix of "LINES TERMINATED BY".
+ Now check the suffix. Length of the suffix of line_term_ptr
+ that still needs to be checked is (line_term_length - i).
+ Note, READ_INFO::terminator() assumes that the leftmost byte of the
+ argument is already scanned from the file and is checked to
+ be a known prefix (e.g. against line_term.initial_char()).
+ So we need to pass one extra byte.
+ */
+ if (terminator(m_line_term.ptr() + i - 1,
+ m_line_term.length() - i + 1))
+ return 0;
+ }
+ /*
+ Here we have a good multi-byte sequence or a broken byte sequence,
+ and the sequence is not equal to "LINES TERMINATED BY".
+ No needs to check for escape_char, because:
+ - multi-byte escape characters in "FIELDS ESCAPED BY" are not
+ supported and are rejected at parse time.
+ - broken single-byte sequences are not recognized as escapes,
+ they are considered to be a part of the data and are converted to
+ question marks.
+ */
+ line_cuted= true;
+ continue;
}
- if (chr == escape_char)
+ if (buf[0] == escape_char)
{
- line_cuted=1;
+ line_cuted= true;
if (GET == my_b_EOF)
- return 1;
+ return 1;
continue;
}
- if (chr == line_term_char && terminator(line_term_ptr,line_term_length))
+ if (terminator(buf[0], m_line_term))
return 0;
- line_cuted=1;
+ line_cuted= true;
}
}
bool READ_INFO::find_start_of_fields()
{
- int chr;
- try_again:
- do
+ for (int chr= GET ; chr != my_b_EOF ; chr= GET)
{
- if ((chr=GET) == my_b_EOF)
- {
- found_end_of_line=eof=1;
- return 1;
- }
- } while ((char) chr != line_start_ptr[0]);
- for (const char *ptr=line_start_ptr+1 ; ptr != line_start_end ; ptr++)
- {
- chr=GET; // Eof will be checked later
- if ((char) chr != *ptr)
- { // Can't be line_start
- PUSH(chr);
- while (--ptr != line_start_ptr)
- { // Restart with next char
- PUSH( *ptr);
- }
- goto try_again;
- }
+ if (terminator(chr, m_line_start))
+ return false;
}
- return 0;
+ return (found_end_of_line= eof= true);
}
@@ -1952,11 +1981,11 @@ int READ_INFO::read_xml(THD *thd)
}
// row tag should be in ROWS IDENTIFIED BY '<row>' - stored in line_term
- if((tag.length() == line_term_length -2) &&
- (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0))
+ if((tag.length() == m_line_term.length() - 2) &&
+ (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0))
{
DBUG_PRINT("read_xml", ("start-of-row: %i %s %s",
- level,tag.c_ptr_safe(), line_term_ptr));
+ level,tag.c_ptr_safe(), m_line_term.ptr()));
}
if(chr == ' ' || chr == '>')
@@ -2023,8 +2052,8 @@ int READ_INFO::read_xml(THD *thd)
chr= my_tospace(GET);
}
- if((tag.length() == line_term_length -2) &&
- (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0))
+ if((tag.length() == m_line_term.length() - 2) &&
+ (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0))
{
DBUG_PRINT("read_xml", ("found end-of-row %i %s",
level, tag.c_ptr_safe()));
diff --git a/sql/sql_locale.h b/sql/sql_locale.h
index 8357a9ecba4..e231393eec6 100644
--- a/sql/sql_locale.h
+++ b/sql/sql_locale.h
@@ -19,7 +19,7 @@
typedef struct my_locale_errmsgs
{
const char *language;
- const char **errmsgs;
+ const char ***errmsgs;
} MY_LOCALE_ERRMSGS;
#include "my_global.h" /* uint */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 53295a7ccfd..7c50e4ed680 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -92,6 +92,7 @@
#include "transaction.h"
#include "sql_audit.h"
#include "sql_prepare.h"
+#include "sql_cte.h"
#include "debug_sync.h"
#include "probes_mysql.h"
#include "set_var.h"
@@ -110,7 +111,7 @@
#include "wsrep_thd.h"
static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state);
+ Parser_state *parser_state, bool is_next_command);
/**
@defgroup Runtime_Environment Runtime Environment
@@ -118,8 +119,6 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
*/
/* Used in error handling only */
-#define SP_TYPE_STRING(LP) \
- ((LP)->sphead->m_type == TYPE_ENUM_FUNCTION ? "FUNCTION" : "PROCEDURE")
#define SP_COM_STRING(LP) \
((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \
(LP)->sql_command == SQLCOM_ALTER_FUNCTION || \
@@ -136,38 +135,263 @@ static bool check_rename_table(THD *, TABLE_LIST *, TABLE_LIST *);
const char *any_db="*any*"; // Special symbol for check_access
-const LEX_STRING command_name[]={
- { C_STRING_WITH_LEN("Sleep") },
- { C_STRING_WITH_LEN("Quit") },
- { C_STRING_WITH_LEN("Init DB") },
- { C_STRING_WITH_LEN("Query") },
- { C_STRING_WITH_LEN("Field List") },
- { C_STRING_WITH_LEN("Create DB") },
- { C_STRING_WITH_LEN("Drop DB") },
- { C_STRING_WITH_LEN("Refresh") },
- { C_STRING_WITH_LEN("Shutdown") },
- { C_STRING_WITH_LEN("Statistics") },
- { C_STRING_WITH_LEN("Processlist") },
- { C_STRING_WITH_LEN("Connect") },
- { C_STRING_WITH_LEN("Kill") },
- { C_STRING_WITH_LEN("Debug") },
- { C_STRING_WITH_LEN("Ping") },
- { C_STRING_WITH_LEN("Time") },
- { C_STRING_WITH_LEN("Delayed insert") },
- { C_STRING_WITH_LEN("Change user") },
- { C_STRING_WITH_LEN("Binlog Dump") },
- { C_STRING_WITH_LEN("Table Dump") },
- { C_STRING_WITH_LEN("Connect Out") },
- { C_STRING_WITH_LEN("Register Slave") },
- { C_STRING_WITH_LEN("Prepare") },
- { C_STRING_WITH_LEN("Execute") },
- { C_STRING_WITH_LEN("Long Data") },
- { C_STRING_WITH_LEN("Close stmt") },
- { C_STRING_WITH_LEN("Reset stmt") },
- { C_STRING_WITH_LEN("Set option") },
- { C_STRING_WITH_LEN("Fetch") },
- { C_STRING_WITH_LEN("Daemon") },
- { C_STRING_WITH_LEN("Error") } // Last command number
+const LEX_STRING command_name[257]={
+ { C_STRING_WITH_LEN("Sleep") }, //0
+ { C_STRING_WITH_LEN("Quit") }, //1
+ { C_STRING_WITH_LEN("Init DB") }, //2
+ { C_STRING_WITH_LEN("Query") }, //3
+ { C_STRING_WITH_LEN("Field List") }, //4
+ { C_STRING_WITH_LEN("Create DB") }, //5
+ { C_STRING_WITH_LEN("Drop DB") }, //6
+ { C_STRING_WITH_LEN("Refresh") }, //7
+ { C_STRING_WITH_LEN("Shutdown") }, //8
+ { C_STRING_WITH_LEN("Statistics") }, //9
+ { C_STRING_WITH_LEN("Processlist") }, //10
+ { C_STRING_WITH_LEN("Connect") }, //11
+ { C_STRING_WITH_LEN("Kill") }, //12
+ { C_STRING_WITH_LEN("Debug") }, //13
+ { C_STRING_WITH_LEN("Ping") }, //14
+ { C_STRING_WITH_LEN("Time") }, //15
+ { C_STRING_WITH_LEN("Delayed insert") }, //16
+ { C_STRING_WITH_LEN("Change user") }, //17
+ { C_STRING_WITH_LEN("Binlog Dump") }, //18
+ { C_STRING_WITH_LEN("Table Dump") }, //19
+ { C_STRING_WITH_LEN("Connect Out") }, //20
+ { C_STRING_WITH_LEN("Register Slave") }, //21
+ { C_STRING_WITH_LEN("Prepare") }, //22
+ { C_STRING_WITH_LEN("Execute") }, //23
+ { C_STRING_WITH_LEN("Long Data") }, //24
+ { C_STRING_WITH_LEN("Close stmt") }, //25
+ { C_STRING_WITH_LEN("Reset stmt") }, //26
+ { C_STRING_WITH_LEN("Set option") }, //27
+ { C_STRING_WITH_LEN("Fetch") }, //28
+ { C_STRING_WITH_LEN("Daemon") }, //29
+ { 0, 0 }, //30
+ { 0, 0 }, //31
+ { 0, 0 }, //32
+ { 0, 0 }, //33
+ { 0, 0 }, //34
+ { 0, 0 }, //35
+ { 0, 0 }, //36
+ { 0, 0 }, //37
+ { 0, 0 }, //38
+ { 0, 0 }, //39
+ { 0, 0 }, //40
+ { 0, 0 }, //41
+ { 0, 0 }, //42
+ { 0, 0 }, //43
+ { 0, 0 }, //44
+ { 0, 0 }, //45
+ { 0, 0 }, //46
+ { 0, 0 }, //47
+ { 0, 0 }, //48
+ { 0, 0 }, //49
+ { 0, 0 }, //50
+ { 0, 0 }, //51
+ { 0, 0 }, //52
+ { 0, 0 }, //53
+ { 0, 0 }, //54
+ { 0, 0 }, //55
+ { 0, 0 }, //56
+ { 0, 0 }, //57
+ { 0, 0 }, //58
+ { 0, 0 }, //59
+ { 0, 0 }, //60
+ { 0, 0 }, //61
+ { 0, 0 }, //62
+ { 0, 0 }, //63
+ { 0, 0 }, //64
+ { 0, 0 }, //65
+ { 0, 0 }, //66
+ { 0, 0 }, //67
+ { 0, 0 }, //68
+ { 0, 0 }, //69
+ { 0, 0 }, //70
+ { 0, 0 }, //71
+ { 0, 0 }, //72
+ { 0, 0 }, //73
+ { 0, 0 }, //74
+ { 0, 0 }, //75
+ { 0, 0 }, //76
+ { 0, 0 }, //77
+ { 0, 0 }, //78
+ { 0, 0 }, //79
+ { 0, 0 }, //80
+ { 0, 0 }, //81
+ { 0, 0 }, //82
+ { 0, 0 }, //83
+ { 0, 0 }, //84
+ { 0, 0 }, //85
+ { 0, 0 }, //86
+ { 0, 0 }, //87
+ { 0, 0 }, //88
+ { 0, 0 }, //89
+ { 0, 0 }, //90
+ { 0, 0 }, //91
+ { 0, 0 }, //92
+ { 0, 0 }, //93
+ { 0, 0 }, //94
+ { 0, 0 }, //95
+ { 0, 0 }, //96
+ { 0, 0 }, //97
+ { 0, 0 }, //98
+ { 0, 0 }, //99
+ { 0, 0 }, //100
+ { 0, 0 }, //101
+ { 0, 0 }, //102
+ { 0, 0 }, //103
+ { 0, 0 }, //104
+ { 0, 0 }, //105
+ { 0, 0 }, //106
+ { 0, 0 }, //107
+ { 0, 0 }, //108
+ { 0, 0 }, //109
+ { 0, 0 }, //110
+ { 0, 0 }, //111
+ { 0, 0 }, //112
+ { 0, 0 }, //113
+ { 0, 0 }, //114
+ { 0, 0 }, //115
+ { 0, 0 }, //116
+ { 0, 0 }, //117
+ { 0, 0 }, //118
+ { 0, 0 }, //119
+ { 0, 0 }, //120
+ { 0, 0 }, //121
+ { 0, 0 }, //122
+ { 0, 0 }, //123
+ { 0, 0 }, //124
+ { 0, 0 }, //125
+ { 0, 0 }, //126
+ { 0, 0 }, //127
+ { 0, 0 }, //128
+ { 0, 0 }, //129
+ { 0, 0 }, //130
+ { 0, 0 }, //131
+ { 0, 0 }, //132
+ { 0, 0 }, //133
+ { 0, 0 }, //134
+ { 0, 0 }, //135
+ { 0, 0 }, //136
+ { 0, 0 }, //137
+ { 0, 0 }, //138
+ { 0, 0 }, //139
+ { 0, 0 }, //140
+ { 0, 0 }, //141
+ { 0, 0 }, //142
+ { 0, 0 }, //143
+ { 0, 0 }, //144
+ { 0, 0 }, //145
+ { 0, 0 }, //146
+ { 0, 0 }, //147
+ { 0, 0 }, //148
+ { 0, 0 }, //149
+ { 0, 0 }, //150
+ { 0, 0 }, //151
+ { 0, 0 }, //152
+ { 0, 0 }, //153
+ { 0, 0 }, //154
+ { 0, 0 }, //155
+ { 0, 0 }, //156
+ { 0, 0 }, //157
+ { 0, 0 }, //158
+ { 0, 0 }, //159
+ { 0, 0 }, //160
+ { 0, 0 }, //161
+ { 0, 0 }, //162
+ { 0, 0 }, //163
+ { 0, 0 }, //164
+ { 0, 0 }, //165
+ { 0, 0 }, //166
+ { 0, 0 }, //167
+ { 0, 0 }, //168
+ { 0, 0 }, //169
+ { 0, 0 }, //170
+ { 0, 0 }, //171
+ { 0, 0 }, //172
+ { 0, 0 }, //173
+ { 0, 0 }, //174
+ { 0, 0 }, //175
+ { 0, 0 }, //176
+ { 0, 0 }, //177
+ { 0, 0 }, //178
+ { 0, 0 }, //179
+ { 0, 0 }, //180
+ { 0, 0 }, //181
+ { 0, 0 }, //182
+ { 0, 0 }, //183
+ { 0, 0 }, //184
+ { 0, 0 }, //185
+ { 0, 0 }, //186
+ { 0, 0 }, //187
+ { 0, 0 }, //188
+ { 0, 0 }, //189
+ { 0, 0 }, //190
+ { 0, 0 }, //191
+ { 0, 0 }, //192
+ { 0, 0 }, //193
+ { 0, 0 }, //194
+ { 0, 0 }, //195
+ { 0, 0 }, //196
+ { 0, 0 }, //197
+ { 0, 0 }, //198
+ { 0, 0 }, //199
+ { 0, 0 }, //200
+ { 0, 0 }, //201
+ { 0, 0 }, //202
+ { 0, 0 }, //203
+ { 0, 0 }, //204
+ { 0, 0 }, //205
+ { 0, 0 }, //206
+ { 0, 0 }, //207
+ { 0, 0 }, //208
+ { 0, 0 }, //209
+ { 0, 0 }, //210
+ { 0, 0 }, //211
+ { 0, 0 }, //212
+ { 0, 0 }, //213
+ { 0, 0 }, //214
+ { 0, 0 }, //215
+ { 0, 0 }, //216
+ { 0, 0 }, //217
+ { 0, 0 }, //218
+ { 0, 0 }, //219
+ { 0, 0 }, //220
+ { 0, 0 }, //221
+ { 0, 0 }, //222
+ { 0, 0 }, //223
+ { 0, 0 }, //224
+ { 0, 0 }, //225
+ { 0, 0 }, //226
+ { 0, 0 }, //227
+ { 0, 0 }, //228
+ { 0, 0 }, //229
+ { 0, 0 }, //230
+ { 0, 0 }, //231
+ { 0, 0 }, //232
+ { 0, 0 }, //233
+ { 0, 0 }, //234
+ { 0, 0 }, //235
+ { 0, 0 }, //236
+ { 0, 0 }, //237
+ { 0, 0 }, //238
+ { 0, 0 }, //239
+ { 0, 0 }, //240
+ { 0, 0 }, //241
+ { 0, 0 }, //242
+ { 0, 0 }, //243
+ { 0, 0 }, //244
+ { 0, 0 }, //245
+ { 0, 0 }, //246
+ { 0, 0 }, //247
+ { 0, 0 }, //248
+ { 0, 0 }, //249
+ { 0, 0 }, //250
+ { 0, 0 }, //251
+ { 0, 0 }, //252
+ { 0, 0 }, //253
+ { C_STRING_WITH_LEN("Com_multi") }, //254
+ { C_STRING_WITH_LEN("Error") } // Last command number 255
};
const char *xa_state_names[]={
@@ -269,7 +493,7 @@ void init_update_queries(void)
memset(server_command_flags, 0, sizeof(server_command_flags));
server_command_flags[COM_STATISTICS]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
- server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
+ server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
server_command_flags[COM_QUIT]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_PROCESS_INFO]= CF_SKIP_WSREP_CHECK;
@@ -278,6 +502,10 @@ void init_update_queries(void)
server_command_flags[COM_SLEEP]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_TIME]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_END]= CF_SKIP_WSREP_CHECK;
+ for (uint i= COM_MDB_GAP_BEG; i <= COM_MDB_GAP_END; i++)
+ {
+ server_command_flags[i]= CF_SKIP_WSREP_CHECK;
+ }
/*
COM_QUERY, COM_SET_OPTION and COM_STMT_XXX are allowed to pass the early
@@ -290,6 +518,7 @@ void init_update_queries(void)
server_command_flags[COM_STMT_RESET]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_SEND_LONG_DATA]= CF_SKIP_WSREP_CHECK;
+ server_command_flags[COM_MULTI]= CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
/* Initialize the sql command flags array. */
memset(sql_command_flags, 0, sizeof(sql_command_flags));
@@ -673,7 +902,7 @@ void execute_init_command(THD *thd, LEX_STRING *init_command,
*/
save_vio= thd->net.vio;
thd->net.vio= 0;
- dispatch_command(COM_QUERY, thd, buf, len);
+ dispatch_command(COM_QUERY, thd, buf, len, FALSE, FALSE);
thd->client_capabilities= save_client_capabilities;
thd->net.vio= save_vio;
@@ -791,7 +1020,7 @@ static void handle_bootstrap_impl(THD *thd)
break;
}
- mysql_parse(thd, thd->query(), length, &parser_state);
+ mysql_parse(thd, thd->query(), length, &parser_state, FALSE);
bootstrap_error= thd->is_error();
thd->protocol->end_statement();
@@ -889,6 +1118,23 @@ void cleanup_items(Item *item)
DBUG_VOID_RETURN;
}
+static enum enum_server_command fetch_command(THD *thd, char *packet)
+{
+ enum enum_server_command
+ command= (enum enum_server_command) (uchar) packet[0];
+ NET *net= &thd->net;
+ DBUG_ENTER("fetch_command");
+
+ if (command >= COM_END ||
+ (command >= COM_MDB_GAP_BEG && command <= COM_MDB_GAP_END))
+ command= COM_END; // Wrong command
+
+ DBUG_PRINT("info",("Command on %s = %d (%s)",
+ vio_description(net->vio), command,
+ command_name[command].str));
+ DBUG_RETURN(command);
+}
+
#ifndef EMBEDDED_LIBRARY
@@ -1077,14 +1323,8 @@ bool do_command(THD *thd)
/* Do not rely on my_net_read, extra safety against programming errors. */
packet[packet_length]= '\0'; /* safety */
- command= (enum enum_server_command) (uchar) packet[0];
- if (command >= COM_END)
- command= COM_END; // Wrong command
-
- DBUG_PRINT("info",("Command on %s = %d (%s)",
- vio_description(net->vio), command,
- command_name[command].str));
+ command= fetch_command(thd, packet);
#ifdef WITH_WSREP
/*
@@ -1110,7 +1350,8 @@ bool do_command(THD *thd)
DBUG_ASSERT(packet_length);
DBUG_ASSERT(!thd->apc_target.is_enabled());
- return_value= dispatch_command(command, thd, packet+1, (uint) (packet_length-1));
+ return_value= dispatch_command(command, thd, packet+1,
+ (uint) (packet_length-1), FALSE, FALSE);
#ifdef WITH_WSREP
if (WSREP(thd))
{
@@ -1128,7 +1369,7 @@ bool do_command(THD *thd)
my_charset_latin1.csname);
}
return_value= dispatch_command(command, thd, thd->wsrep_retry_query,
- thd->wsrep_retry_query_len);
+ thd->wsrep_retry_query_len, FALSE, FALSE);
thd->variables.character_set_client = current_charset;
}
@@ -1220,6 +1461,44 @@ static my_bool deny_updates_if_read_only_option(THD *thd,
/**
+ check COM_MULTI packet
+
+ @param thd thread handle
+ @param packet pointer on the packet of commands
+ @param packet_length length of this packet
+
+ @retval 0 - Error
+ @retval # - Number of commands in the batch
+*/
+
+uint maria_multi_check(THD *thd, char *packet, uint packet_length)
+{
+ uint counter= 0;
+ DBUG_ENTER("maria_multi_check");
+ while (packet_length)
+ {
+ // length of command + 3 bytes where that length was stored
+ uint subpacket_length= (uint3korr(packet) + 3);
+ DBUG_PRINT("info", ("sub-packet length: %d command: %x",
+ subpacket_length, packet[3]));
+
+ if (subpacket_length == 3 ||
+ subpacket_length > packet_length)
+ {
+ my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR),
+ MYF(0));
+ DBUG_RETURN(0);
+ }
+
+ counter++;
+ packet+= subpacket_length;
+ packet_length-= subpacket_length;
+ }
+ DBUG_RETURN(counter);
+}
+
+
+/**
Perform one connection-level (COM_XXXX) command.
@param command type of command to perform
@@ -1228,6 +1507,8 @@ static my_bool deny_updates_if_read_only_option(THD *thd,
@param packet_length length of packet + 1 (to show that data is
null-terminated) except for COM_SLEEP, where it
can be zero.
+ @param is_com_multi recursive call from COM_MULTI
+ @param is_next_command there will be more command in the COM_MULTI batch
@todo
set thd->lex->sql_command to SQLCOM_END here.
@@ -1241,15 +1522,24 @@ static my_bool deny_updates_if_read_only_option(THD *thd,
COM_QUIT/COM_SHUTDOWN
*/
bool dispatch_command(enum enum_server_command command, THD *thd,
- char* packet, uint packet_length)
+ char* packet, uint packet_length, bool is_com_multi,
+ bool is_next_command)
{
NET *net= &thd->net;
bool error= 0;
bool do_end_of_statement= true;
DBUG_ENTER("dispatch_command");
- DBUG_PRINT("info", ("command: %d", command));
+ DBUG_PRINT("info", ("command: %d %s", command,
+ (command_name[command].str != 0 ?
+ command_name[command].str :
+ "<?>")));
+ bool drop_more_results= 0;
+
+ if (!is_com_multi)
+ inc_thread_running();
- inc_thread_running();
+ /* keep it withing 1 byte */
+ compile_time_assert(COM_END == 255);
#ifdef WITH_WSREP
if (WSREP(thd))
@@ -1339,6 +1629,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
beginning of each command.
*/
thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
+ if (is_next_command)
+ {
+ drop_more_results= !MY_TEST(thd->server_status &
+ SERVER_MORE_RESULTS_EXISTS);
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
+ }
+
switch (command) {
case COM_INIT_DB:
{
@@ -1487,9 +1784,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
if (WSREP_ON)
- wsrep_mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ wsrep_mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ is_next_command);
else
- mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ is_next_command);
while (!thd->killed && (parser_state.m_lip.found_semicolon != NULL) &&
! thd->is_error())
@@ -1574,9 +1873,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* TODO: set thd->lex->sql_command to SQLCOM_END here */
if (WSREP_ON)
- wsrep_mysql_parse(thd, beginning_of_next_stmt, length, &parser_state);
+ wsrep_mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
+ is_next_command);
else
- mysql_parse(thd, beginning_of_next_stmt, length, &parser_state);
+ mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
+ is_next_command);
}
@@ -1628,6 +1929,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
packet= arg_end + 1;
thd->reset_for_next_command();
+ // thd->reset_for_next_command reset state => restore it
+ if (is_next_command)
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
lex_start(thd);
/* Must be before we init the table list. */
if (lower_case_table_names)
@@ -1906,6 +2210,66 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
general_log_print(thd, command, NullS);
my_eof(thd);
break;
+ case COM_MULTI:
+ {
+ uint counter;
+ uint current_com= 0;
+ DBUG_ASSERT(!is_com_multi);
+ if (!(thd->client_capabilities & CLIENT_MULTI_RESULTS))
+ {
+ /* The client does not support multiple result sets being sent back */
+ my_error(ER_COMMULTI_BADCONTEXT, MYF(0));
+ break;
+ }
+
+ if (!(counter= maria_multi_check(thd, packet, packet_length)))
+ break;
+
+ {
+ /* We have to store next length because it will be destroyed by '\0' */
+ uint next_subpacket_length= uint3korr(packet);
+ unsigned char *readbuff= net->buff;
+
+ if (net_allocate_new_packet(net, thd, MYF(0)))
+ break;
+
+ while (packet_length)
+ {
+ current_com++;
+ uint subpacket_length= next_subpacket_length + 3;
+ if (subpacket_length < packet_length)
+ next_subpacket_length= uint3korr(packet + subpacket_length);
+ /* safety like in do_command() */
+ packet[subpacket_length]= '\0';
+
+ enum enum_server_command subcommand= fetch_command(thd, (packet + 3));
+
+ if (server_command_flags[subcommand] & CF_NO_COM_MULTI)
+ {
+ my_error(ER_BAD_COMMAND_IN_MULTI, MYF(0), command_name[subcommand]);
+ goto com_multi_end;
+ }
+
+ if (dispatch_command(subcommand, thd, packet + (1 + 3),
+ subpacket_length - (1 + 3), TRUE,
+ (current_com != counter)))
+ {
+ DBUG_ASSERT(thd->is_error());
+ goto com_multi_end;
+ }
+
+ DBUG_ASSERT(subpacket_length <= packet_length);
+ packet+= subpacket_length;
+ packet_length-= subpacket_length;
+ }
+
+com_multi_end:
+ /* release old buffer */
+ DBUG_ASSERT(net->buff == net->write_pos); // nothing to send
+ my_free(readbuff);
+ }
+ break;
+ }
case COM_SLEEP:
case COM_CONNECT: // Impossible here
case COM_TIME: // Impossible from client
@@ -1942,9 +2306,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd_proc_info(thd, "updating status");
/* Finalize server status flags after executing a command. */
thd->update_server_status();
- thd->protocol->end_statement();
- query_cache_end_of_result(thd);
+ if (command != COM_MULTI)
+ {
+ thd->protocol->end_statement();
+ query_cache_end_of_result(thd);
+ }
}
+ if (drop_more_results)
+ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
if (!thd->is_error() && !thd->killed_errno())
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0);
@@ -1968,8 +2337,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->m_statement_psi= NULL;
thd->m_digest= NULL;
- dec_thread_running();
- thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
+ if (!is_com_multi)
+ {
+ dec_thread_running();
+ thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
+ }
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
#if defined(ENABLED_PROFILING)
@@ -5121,7 +5493,6 @@ end_with_restore_list:
{
uint namelen;
char *name;
- int sp_result= SP_INTERNAL_ERROR;
DBUG_ASSERT(lex->sphead != 0);
DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
@@ -5132,23 +5503,12 @@ end_with_restore_list:
if (check_db_name(&lex->sphead->m_db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), lex->sphead->m_db.str);
- goto create_sp_error;
+ goto error;
}
if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str,
NULL, NULL, 0, 0))
- goto create_sp_error;
-
- /*
- Check that a database directory with this name
- exists. Design note: This won't work on virtual databases
- like information_schema.
- */
- if (check_db_dir_existence(lex->sphead->m_db.str))
- {
- my_error(ER_BAD_DB_ERROR, MYF(0), lex->sphead->m_db.str);
- goto create_sp_error;
- }
+ goto error;
/* Checking the drop permissions if CREATE OR REPLACE is used */
if (lex->create_info.or_replace())
@@ -5156,7 +5516,7 @@ end_with_restore_list:
if (check_routine_access(thd, ALTER_PROC_ACL, lex->spname->m_db.str,
lex->spname->m_name.str,
lex->sql_command == SQLCOM_DROP_PROCEDURE, 0))
- goto create_sp_error;
+ goto error;
}
name= lex->sphead->name(&namelen);
@@ -5168,18 +5528,17 @@ end_with_restore_list:
if (udf)
{
my_error(ER_UDF_EXISTS, MYF(0), name);
- goto create_sp_error;
+ goto error;
}
}
#endif
if (sp_process_definer(thd))
- goto create_sp_error;
+ goto error;
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
- res= (sp_result= sp_create_routine(thd, lex->sphead->m_type, lex->sphead));
- switch (sp_result) {
- case SP_OK: {
+ if (!sp_create_routine(thd, lex->sphead->m_type, lex->sphead))
+ {
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* only add privileges if really neccessary */
@@ -5244,31 +5603,8 @@ end_with_restore_list:
}
#endif
- break;
}
- case SP_WRITE_ROW_FAILED:
- my_error(ER_SP_ALREADY_EXISTS, MYF(0), SP_TYPE_STRING(lex), name);
- break;
- case SP_BAD_IDENTIFIER:
- my_error(ER_TOO_LONG_IDENT, MYF(0), name);
- break;
- case SP_BODY_TOO_LONG:
- my_error(ER_TOO_LONG_BODY, MYF(0), name);
- break;
- case SP_FLD_STORE_FAILED:
- my_error(ER_CANT_CREATE_SROUTINE, MYF(0), name);
- break;
- default:
- my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(lex), name);
- break;
- } /* end switch */
-
- /*
- Capture all errors within this CASE and
- clean up the environment.
- */
-create_sp_error:
- if (sp_result != SP_OK )
+ else
goto error;
my_ok(thd);
break; /* break super switch */
@@ -5668,6 +6004,8 @@ create_sp_error:
if (check_global_access(thd, SUPER_ACL))
break;
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+
res= create_server(thd, &lex->server_options);
break;
}
@@ -5679,6 +6017,8 @@ create_sp_error:
if (check_global_access(thd, SUPER_ACL))
break;
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+
if ((error= alter_server(thd, &lex->server_options)))
{
DBUG_PRINT("info", ("problem altering server <%s>",
@@ -5697,6 +6037,8 @@ create_sp_error:
if (check_global_access(thd, SUPER_ACL))
break;
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+
if ((err_code= drop_server(thd, &lex->server_options)))
{
if (! lex->if_exists() && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST)
@@ -5873,6 +6215,9 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
new (thd->mem_root) Item_int(thd,
(ulonglong) thd->variables.select_limit);
}
+ if (check_dependencies_in_with_clauses(lex->with_clauses_list))
+ return 1;
+
if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0)))
{
if (lex->describe)
@@ -7155,7 +7500,7 @@ void mysql_init_multi_delete(LEX *lex)
}
static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state)
+ Parser_state *parser_state, bool is_next_command)
{
#ifdef WITH_WSREP
bool is_autocommit=
@@ -7174,7 +7519,7 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, thd->query(),
thd->query_length());
}
- mysql_parse(thd, rawbuf, length, parser_state);
+ mysql_parse(thd, rawbuf, length, parser_state, is_next_command);
if (WSREP(thd)) {
/* wsrep BF abort in query exec phase */
@@ -7272,10 +7617,11 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
@param length Length of the query text
@param[out] found_semicolon For multi queries, position of the character of
the next query in the query text.
+ @param is_next_command there will be more command in the COM_MULTI batch
*/
void mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state)
+ Parser_state *parser_state, bool is_next_command)
{
int error __attribute__((unused));
DBUG_ENTER("mysql_parse");
@@ -7299,6 +7645,8 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
*/
lex_start(thd);
thd->reset_for_next_command();
+ if (is_next_command)
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
if (query_cache_send_result_to_client(thd, rawbuf, length) <= 0)
{
@@ -7989,7 +8337,7 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
@retval
FALSE if all is OK
@retval
- TRUE if a memory allocation error occured
+ TRUE if a memory allocation error occurred
*/
bool
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 6cb49f267d2..53a9ed3b24c 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -35,6 +35,7 @@ enum enum_mysql_completiontype {
extern "C" int test_if_data_home_dir(const char *dir);
int error_if_data_home_dir(const char *path, const char *what);
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags);
bool multi_update_precheck(THD *thd, TABLE_LIST *tables);
bool multi_delete_precheck(THD *thd, TABLE_LIST *tables);
@@ -87,7 +88,7 @@ bool is_log_table_write_query(enum enum_sql_command command);
bool alloc_query(THD *thd, const char *packet, uint packet_length);
void mysql_init_select(LEX *lex);
void mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state);
+ Parser_state *parser_state, bool is_com_multi);
bool mysql_new_select(LEX *lex, bool move_down);
void create_select_for_variable(const char *var_name);
void create_table_set_open_action_and_adjust_tables(LEX *lex);
@@ -99,7 +100,8 @@ int mysql_execute_command(THD *thd);
bool do_command(THD *thd);
void do_handle_bootstrap(THD *thd);
bool dispatch_command(enum enum_server_command command, THD *thd,
- char* packet, uint packet_length);
+ char* packet, uint packet_length,
+ bool is_com_multi, bool is_next_command);
void log_slow_statement(THD *thd);
bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name);
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 9a59764f425..dbe19674cf2 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1796,7 +1796,8 @@ static void plugin_load(MEM_ROOT *tmp_root)
goto end;
}
- if (init_read_record(&read_record_info, new_thd, table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, new_thd, table, NULL, NULL, 1, 0,
+ FALSE))
{
sql_print_error("Could not initialize init_read_record; Plugins not "
"loaded");
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 00b451c8821..8e5ab71288d 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -102,6 +102,7 @@ When one supplies long data for a placeholder:
#include "sql_acl.h" // *_ACL
#include "sql_derived.h" // mysql_derived_prepare,
// mysql_handle_derived
+#include "sql_cte.h"
#include "sql_cursor.h"
#include "sql_show.h"
#include "sql_repl.h"
@@ -326,8 +327,14 @@ find_prepared_statement(THD *thd, ulong id)
To strictly separate namespaces of SQL prepared statements and C API
prepared statements find() will return 0 if there is a named prepared
statement with such id.
+
+ LAST_STMT_ID is special value which mean last prepared statement ID
+ (it was made for COM_MULTI to allow prepare and execute a statement
+ in the same command but usage is not limited by COM_MULTI only).
*/
- Statement *stmt= thd->stmt_map.find(id);
+ Statement *stmt= ((id == LAST_STMT_ID) ?
+ thd->last_stmt :
+ thd->stmt_map.find(id));
if (stmt == 0 || stmt->type() != Query_arena::PREPARED_STATEMENT)
return NULL;
@@ -1500,6 +1507,8 @@ static int mysql_test_select(Prepared_statement *stmt,
lex->select_lex.context.resolve_in_select_list= TRUE;
ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL;
+ if (check_dependencies_in_with_clauses(lex->with_clauses_list))
+ goto error;
if (tables)
{
if (check_table_access(thd, privilege, tables, FALSE, UINT_MAX, FALSE))
@@ -1995,7 +2004,7 @@ static int mysql_test_show_create_routine(Prepared_statement *stmt, int type)
@note This function handles create view commands.
@retval FALSE Operation was a success.
- @retval TRUE An error occured.
+ @retval TRUE An error occurred.
*/
static bool mysql_test_create_view(Prepared_statement *stmt)
@@ -2446,10 +2455,14 @@ static bool check_prepared_statement(Prepared_statement *stmt)
case SQLCOM_CREATE_USER:
case SQLCOM_RENAME_USER:
case SQLCOM_DROP_USER:
+ case SQLCOM_CREATE_ROLE:
+ case SQLCOM_DROP_ROLE:
case SQLCOM_ASSIGN_TO_KEYCACHE:
case SQLCOM_PRELOAD_KEYS:
case SQLCOM_GRANT:
+ case SQLCOM_GRANT_ROLE:
case SQLCOM_REVOKE:
+ case SQLCOM_REVOKE_ROLE:
case SQLCOM_KILL:
case SQLCOM_COMPOUND:
case SQLCOM_SHUTDOWN:
@@ -2569,7 +2582,10 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
{
/* Statement map deletes statement on erase */
thd->stmt_map.erase(stmt);
+ thd->clear_last_stmt();
}
+ else
+ thd->set_last_stmt(stmt);
thd->protocol= save_protocol;
@@ -3155,6 +3171,9 @@ void mysqld_stmt_close(THD *thd, char *packet)
stmt->deallocate();
general_log_print(thd, thd->get_command(), NullS);
+ if (thd->last_stmt == stmt)
+ thd->clear_last_stmt();
+
DBUG_VOID_RETURN;
}
@@ -3417,7 +3436,8 @@ end:
Prepared_statement::Prepared_statement(THD *thd_arg)
:Statement(NULL, &main_mem_root,
- STMT_INITIALIZED, ++thd_arg->statement_id_counter),
+ STMT_INITIALIZED,
+ ((++thd_arg->statement_id_counter) & STMT_ID_MASK)),
thd(thd_arg),
result(thd_arg),
param_array(0),
diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h
index b468ac1bf9b..aec4ac40036 100644
--- a/sql/sql_prepare.h
+++ b/sql/sql_prepare.h
@@ -18,6 +18,10 @@
#include "sql_error.h"
+
+#define LAST_STMT_ID 0xFFFFFFFF
+#define STMT_ID_MASK 0x7FFFFFFF
+
class THD;
struct LEX;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index cc22377b117..c9e2b3a586d 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -3281,7 +3281,7 @@ int reset_slave(THD *thd, Master_info* mi)
char fname[FN_REFLEN];
int thread_mask= 0, error= 0;
uint sql_errno=ER_UNKNOWN_ERROR;
- const char* errmsg= "Unknown error occured while reseting slave";
+ const char* errmsg= "Unknown error occurred while reseting slave";
char master_info_file_tmp[FN_REFLEN];
char relay_log_info_file_tmp[FN_REFLEN];
DBUG_ENTER("reset_slave");
@@ -3884,7 +3884,7 @@ bool mysql_show_binlog_events(THD* thd)
DBUG_ASSERT(thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS ||
thd->lex->sql_command == SQLCOM_SHOW_RELAYLOG_EVENTS);
- /* select wich binary log to use: binlog or relay */
+ /* select which binary log to use: binlog or relay */
if ( thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS )
{
binary_log= &mysql_bin_log;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4ce29f1d666..3134f8eb007 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -53,6 +53,7 @@
#include "log_slow.h"
#include "sql_derived.h"
#include "sql_statistics.h"
+#include "sql_cte.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@@ -448,7 +449,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
this field from inner subqueries.
@return Status
- @retval true An error occured.
+ @retval true An error occurred.
@retval false OK.
*/
@@ -828,6 +829,10 @@ JOIN::prepare(Item ***rref_pointer_array,
DBUG_RETURN(-1); /* purecov: inspected */
thd->lex->allow_sum_func= save_allow_sum_func;
}
+
+ With_clause *with_clause=select_lex->get_with_clause();
+ if (with_clause && with_clause->prepare_unreferenced_elements(thd))
+ DBUG_RETURN(1);
int res= check_and_do_in_subquery_rewrites(this);
@@ -1430,7 +1435,7 @@ JOIN::optimize_inner()
}
select= make_select(*table, const_table_map,
- const_table_map, conds, 1, &error);
+ const_table_map, conds, (SORT_INFO*) 0, 1, &error);
if (error)
{ /* purecov: inspected */
error= -1; /* purecov: inspected */
@@ -2368,15 +2373,11 @@ JOIN::reinit()
{
exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
exec_tmp_table1->file->ha_delete_all_rows();
- free_io_cache(exec_tmp_table1);
- filesort_free_buffers(exec_tmp_table1,0);
}
if (exec_tmp_table2)
{
exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
exec_tmp_table2->file->ha_delete_all_rows();
- free_io_cache(exec_tmp_table2);
- filesort_free_buffers(exec_tmp_table2,0);
}
clear_sj_tmp_tables(this);
if (items0)
@@ -3193,12 +3194,12 @@ void JOIN::exec_inner()
DBUG_VOID_RETURN;
sortorder= curr_join->sortorder;
if (curr_join->const_tables != curr_join->table_count &&
- !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
+ !curr_join->join_tab[curr_join->const_tables].filesort)
{
/*
- If no IO cache exists for the first table then we are using an
- INDEX SCAN and no filesort. Thus we should not remove the sorted
- attribute on the INDEX SCAN.
+ If no filesort for the first table then we are using an
+ INDEX SCAN. Thus we should not remove the sorted attribute
+ on the INDEX SCAN.
*/
skip_sort_order= 1;
}
@@ -4095,6 +4096,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
select= make_select(s->table, found_const_table_map,
found_const_table_map,
*s->on_expr_ref ? *s->on_expr_ref : join->conds,
+ (SORT_INFO*) 0,
1, &error);
if (!select)
goto error;
@@ -9043,13 +9045,21 @@ JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
/*
Reuse TABLE * and JOIN_TAB if already allocated by a previous call
to this function through JOIN::exec (may happen for sub-queries).
+
+ psergey-todo: here, save the pointer for original join_tabs.
*/
- if (!parent->join_tab_reexec &&
- !(parent->join_tab_reexec= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
- DBUG_RETURN(TRUE); /* purecov: inspected */
+ if (!(join_tab= parent->join_tab_reexec))
+ {
+ if (!(join_tab= parent->join_tab_reexec=
+ (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
+ DBUG_RETURN(TRUE); /* purecov: inspected */
+ }
+ else
+ {
+ /* Free memory used by previous allocations */
+ delete join_tab->filesort;
+ }
- // psergey-todo: here, save the pointer for original join_tabs.
- join_tab= parent->join_tab_reexec;
table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
table_count= top_join_tab_count= 1;
@@ -11412,13 +11422,16 @@ bool error_if_full_join(JOIN *join)
void JOIN_TAB::cleanup()
{
DBUG_ENTER("JOIN_TAB::cleanup");
- DBUG_PRINT("enter", ("table %s.%s",
+ DBUG_PRINT("enter", ("tab: %p table %s.%s",
+ this,
(table ? table->s->db.str : "?"),
(table ? table->s->table_name.str : "?")));
delete select;
select= 0;
delete quick;
quick= 0;
+ delete filesort;
+ filesort= 0;
if (cache)
{
cache->free();
@@ -11817,8 +11830,8 @@ void JOIN::cleanup(bool full)
JOIN_TAB *first_tab= first_top_level_tab(this, WITHOUT_CONST_TABLES);
if (first_tab->table)
{
- free_io_cache(first_tab->table);
- filesort_free_buffers(first_tab->table, full);
+ delete first_tab->filesort;
+ first_tab->filesort= 0;
}
}
if (full)
@@ -12853,7 +12866,7 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
equality predicates that is equivalent to the conjunction.
Thus, =(a1,a2,a3) can substitute for ((a1=a3) AND (a2=a3) AND (a2=a1)) as
it is equivalent to ((a1=a2) AND (a2=a3)).
- The function always makes a substitution of all equality predicates occured
+ The function always makes a substitution of all equality predicates occurred
in a conjuction for a minimal set of multiple equality predicates.
This set can be considered as a canonical representation of the
sub-conjunction of the equality predicates.
@@ -15628,8 +15641,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
the record in the original table.
If item == NULL then fill_record() will update
the temporary table
- @param convert_blob_length If >0 create a varstring(convert_blob_length)
- field instead of blob.
@retval
NULL on error
@@ -15639,23 +15650,12 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
Field *create_tmp_field_from_field(THD *thd, Field *org_field,
const char *name, TABLE *table,
- Item_field *item, uint convert_blob_length)
+ Item_field *item)
{
Field *new_field;
- /*
- Make sure that the blob fits into a Field_varstring which has
- 2-byte lenght.
- */
- if (convert_blob_length && convert_blob_length <= Field_varstring::MAX_SIZE &&
- (org_field->flags & BLOB_FLAG))
- new_field= new Field_varstring(convert_blob_length,
- org_field->maybe_null(),
- org_field->field_name, table->s,
- org_field->charset());
- else
- new_field= org_field->make_new_field(thd->mem_root, table,
- table == org_field->table);
+ new_field= org_field->make_new_field(thd->mem_root, table,
+ table == org_field->table);
if (new_field)
{
new_field->init(table);
@@ -15682,9 +15682,7 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
}
-Field *Item::create_tmp_field(bool group, TABLE *table,
- uint convert_blob_length,
- uint convert_int_length)
+Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length)
{
Field *UNINIT_VAR(new_field);
MEM_ROOT *mem_root= table->in_use->mem_root;
@@ -15718,16 +15716,6 @@ Field *Item::create_tmp_field(bool group, TABLE *table,
*/
if (field_type() == MYSQL_TYPE_GEOMETRY)
new_field= tmp_table_field_from_field_type(table, true, false);
- /*
- Make sure that the blob fits into a Field_varstring which has
- 2-byte lenght.
- */
- else if (max_length / collation.collation->mbmaxlen > 255 &&
- convert_blob_length <= Field_varstring::MAX_SIZE &&
- convert_blob_length)
- new_field= new (mem_root)
- Field_varstring(convert_blob_length, maybe_null,
- name, table->s, collation.collation);
else
new_field= make_string_field(table);
new_field->set_derivation(collation.derivation);
@@ -15773,12 +15761,11 @@ Field *Item::create_tmp_field(bool group, TABLE *table,
*/
static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
- Item ***copy_func, bool modify_item,
- uint convert_blob_length)
+ Item ***copy_func, bool modify_item)
{
Field *UNINIT_VAR(new_field);
DBUG_ASSERT(thd == table->in_use);
- new_field= item->Item::create_tmp_field(false, table, convert_blob_length);
+ new_field= item->Item::create_tmp_field(false, table);
if (copy_func && item->real_item()->is_result_field())
*((*copy_func)++) = item; // Save for copy_funcs
@@ -15841,8 +15828,6 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
the record in the original table.
If modify_item is 0 then fill_record() will update
the temporary table
- @param convert_blob_length If >0 create a varstring(convert_blob_length)
- field instead of blob.
@retval
0 on error
@@ -15855,8 +15840,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
Field **default_field,
bool group, bool modify_item,
bool table_cant_handle_bit_fields,
- bool make_copy_field,
- uint convert_blob_length)
+ bool make_copy_field)
{
Field *result;
Item::Type orig_type= type;
@@ -15873,7 +15857,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
switch (type) {
case Item::SUM_FUNC_ITEM:
{
- result= item->create_tmp_field(group, table, convert_blob_length);
+ result= item->create_tmp_field(group, table);
if (!result)
my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
return result;
@@ -15909,7 +15893,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
item->maybe_null= orig_item->maybe_null;
}
result= create_tmp_field_from_item(thd, item, table, NULL,
- modify_item, convert_blob_length);
+ modify_item);
*from_field= field->field;
if (result && modify_item)
field->result_field= result;
@@ -15921,7 +15905,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
{
*from_field= field->field;
result= create_tmp_field_from_item(thd, item, table, copy_func,
- modify_item, convert_blob_length);
+ modify_item);
if (result && modify_item)
field->result_field= result;
}
@@ -15931,8 +15915,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
item->name,
table,
modify_item ? field :
- NULL,
- convert_blob_length);
+ NULL);
if (orig_type == Item::REF_ITEM && orig_modify)
((Item_ref*)orig_item)->set_result_field(result);
/*
@@ -15966,8 +15949,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
sp_result_field,
item_func_sp->name,
table,
- NULL,
- convert_blob_length);
+ NULL);
if (modify_item)
item->set_result_field(result_field);
@@ -15999,7 +15981,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
return create_tmp_field_from_item(thd, item, table,
(make_copy_field ? 0 : copy_func),
- modify_item, convert_blob_length);
+ modify_item);
case Item::TYPE_HOLDER:
result= ((Item_type_holder *)item)->make_field_by_type(table);
result->set_derivation(item->collation.derivation);
@@ -16309,8 +16291,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
create_tmp_field(thd, table, arg, arg->type(), &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
- distinct, 0,
- param->convert_blob_length);
+ distinct, false);
if (!new_field)
goto err; // Should be OOM
tmp_from_field++;
@@ -16380,8 +16361,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
to be usable in this case too.
*/
item->marker == 4 || param->bit_fields_as_long,
- force_copy_fields,
- param->convert_blob_length);
+ force_copy_fields);
if (!new_field)
{
@@ -16745,8 +16725,6 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
cur_group->buff++; // Pointer to field data
group_buff++; // Skipp null flag
}
- /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */
- key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL;
group_buff+= cur_group->field->pack_length();
}
keyinfo->key_length+= key_part_info->length;
@@ -17593,7 +17571,6 @@ free_tmp_table(THD *thd, TABLE *entry)
/* free blobs */
for (Field **ptr=entry->field ; *ptr ; ptr++)
(*ptr)->free();
- free_io_cache(entry);
if (entry->temp_pool_slot != MY_BIT_NONE)
bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
@@ -19056,7 +19033,7 @@ int join_init_read_record(JOIN_TAB *tab)
if (!tab->preread_init_done && tab->preread_init())
return 1;
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
- tab->select,1,1, FALSE))
+ tab->select, tab->filesort, 1,1, FALSE))
return 1;
return (*tab->read_record.read_record)(&tab->read_record);
}
@@ -19074,7 +19051,7 @@ join_read_record_no_init(JOIN_TAB *tab)
save_copy_end= tab->read_record.copy_field_end;
init_read_record(&tab->read_record, tab->join->thd, tab->table,
- tab->select,1,1, FALSE);
+ tab->select, tab->filesort, 1, 1, FALSE);
tab->read_record.copy_field= save_copy;
tab->read_record.copy_field_end= save_copy_end;
@@ -19319,11 +19296,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
TABLE *table=jt->table;
join->select_options ^= OPTION_FOUND_ROWS;
- if (table->sort.record_pointers ||
- (table->sort.io_cache && my_b_inited(table->sort.io_cache)))
+ if (jt->filesort) // If filesort was used
{
- /* Using filesort */
- join->send_records= table->sort.found_records;
+ join->send_records= jt->filesort->found_rows;
}
else
{
@@ -21053,8 +21028,7 @@ use_filesort:
'join' is modified to use this index.
- If no index, create with filesort() an index file that can be used to
retrieve rows in order (should be done with 'read_record').
- The sorted data is stored in tab->table and will be freed when calling
- free_io_cache(tab->table).
+ The sorted data is stored in tab->filesort
RETURN VALUES
0 ok
@@ -21067,15 +21041,12 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
ha_rows filesort_limit, ha_rows select_limit,
bool is_order_by)
{
- uint length= 0;
- ha_rows examined_rows;
- ha_rows found_rows;
- ha_rows filesort_retval= HA_POS_ERROR;
+ uint length;
TABLE *table;
SQL_SELECT *select;
JOIN_TAB *tab;
- int err= 0;
bool quick_created= FALSE;
+ SORT_INFO *file_sort= 0;
DBUG_ENTER("create_sort_index");
if (join->table_count == join->const_tables)
@@ -21160,15 +21131,19 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
}
tab->update_explain_data(join->const_tables);
+ /*
+ Calculate length of join->order as this may be longer than 'order',
+ which may come from 'group by'. This is needed as join->sortorder is
+ used both for grouping and ordering.
+ */
+ length= 0;
for (ORDER *ord= join->order; ord; ord= ord->next)
length++;
- if (!(join->sortorder=
+
+ if (!(join->sortorder=
make_unireg_sortorder(thd, order, &length, join->sortorder)))
goto err; /* purecov: inspected */
- table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_WME | MY_ZEROFILL|
- MY_THREAD_SPECIFIC));
table->status=0; // May be wrong if quick_select
if (!tab->preread_init_done && tab->preread_init())
@@ -21212,12 +21187,18 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
- filesort_retval= filesort(thd, table, join->sortorder, length,
- select, filesort_limit, 0,
- &examined_rows, &found_rows,
- join->explain->ops_tracker.report_sorting(thd));
- table->sort.found_records= filesort_retval;
- tab->records= join->select_options & OPTION_FOUND_ROWS ? found_rows : filesort_retval;
+ file_sort= filesort(thd, table, join->sortorder, length,
+ select, filesort_limit, 0,
+ join->explain->ops_tracker.report_sorting(thd));
+ DBUG_ASSERT(tab->filesort == 0);
+ tab->filesort= file_sort;
+ tab->records= 0;
+ if (file_sort)
+ {
+ tab->records= join->select_options & OPTION_FOUND_ROWS ?
+ file_sort->found_rows : file_sort->return_rows;
+ tab->join->join_examined_rows+= file_sort->examined_rows;
+ }
if (quick_created)
{
@@ -21240,12 +21221,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
tab->type=JT_ALL; // Read with normal read_record
tab->read_first_record= join_init_read_record;
tab->table->file->ha_index_or_rnd_end();
-
- if (err)
- goto err;
- tab->join->join_examined_rows+= examined_rows;
- DBUG_RETURN(filesort_retval == HA_POS_ERROR);
+ DBUG_RETURN(file_sort == 0);
err:
DBUG_RETURN(-1);
}
@@ -21368,7 +21345,6 @@ remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
if (thd->killed == ABORT_QUERY)
thd->reset_killed();
- free_io_cache(table); // Safety
table->file->info(HA_STATUS_VARIABLE);
if (table->s->db_type() == heap_hton ||
(!table->s->blob_fields &&
@@ -21764,7 +21740,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
*/
if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item())
{ /* Order by position */
- uint count= (uint) order_item->val_int();
+ uint count;
+ if (order->counter_used)
+ count= order->counter; // counter was once resolved
+ else
+ count= (uint) order_item->val_int();
if (!count || count > fields.elements)
{
my_error(ER_BAD_FIELD_ERROR, MYF(0),
@@ -21781,7 +21761,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
select_item= find_item_in_list(order_item, fields, &counter,
REPORT_EXCEPT_NOT_FOUND, &resolution);
if (!select_item)
- return TRUE; /* The item is not unique, or some other error occured. */
+ return TRUE; /* The item is not unique, or some other error occurred. */
/* Check whether the resolved field is not ambiguos. */
@@ -23131,8 +23111,8 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
join_tab->set_select_cond(cond, __LINE__);
}
- else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
- &error)))
+ else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond,
+ (SORT_INFO*) 0, 0, &error)))
join_tab->set_select_cond(cond, __LINE__);
DBUG_RETURN(error ? TRUE : FALSE);
@@ -24192,9 +24172,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
/* There should be no attempts to save query plans for merged selects */
DBUG_ASSERT(!join->select_lex->master_unit()->derived ||
- join->select_lex->master_unit()->derived->is_materialized_derived());
-
- explain= NULL;
+ join->select_lex->master_unit()->derived->is_materialized_derived() ||
+ join->select_lex->master_unit()->derived->is_with_table());
/* Don't log this into the slow query log */
@@ -24704,11 +24683,19 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
else if (derived)
{
- // A derived table
- str->append('(');
- derived->print(str, query_type);
- str->append(')');
- cmp_name= ""; // Force printing of alias
+ if (!derived->derived->is_with_table())
+ {
+ // A derived table
+ str->append('(');
+ derived->print(str, query_type);
+ str->append(')');
+ cmp_name= ""; // Force printing of alias
+ }
+ else
+ {
+ append_identifier(thd, str, table_name, table_name_length);
+ cmp_name= table_name;
+ }
}
else
{
@@ -25104,7 +25091,7 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
@retval REOPT_NEW_PLAN there is a new plan.
@retval REOPT_OLD_PLAN no new improved plan was produced, use the old one.
- @retval REOPT_ERROR an irrecovarable error occured during reoptimization.
+ @retval REOPT_ERROR an irrecovarable error occurred during reoptimization.
*/
JOIN::enum_reopt_result
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 95550f56450..87de9316c3a 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -32,6 +32,7 @@
#include "sql_array.h" /* Array */
#include "records.h" /* READ_RECORD */
#include "opt_range.h" /* SQL_SELECT, QUICK_SELECT_I */
+#include "filesort.h"
/* Values in optimize */
#define KEY_OPTIMIZE_EXISTS 1
@@ -236,6 +237,7 @@ typedef struct st_join_table {
For join tabs that are inside an SJM bush: root of the bush
*/
st_join_table *bush_root_tab;
+ SORT_INFO *filesort;
/* TRUE <=> This join_tab is inside an SJM bush and is the last leaf tab here */
bool last_leaf_in_bush;
@@ -938,7 +940,7 @@ protected:
enum enum_reopt_result {
REOPT_NEW_PLAN, /* there is a new reoptimized plan */
REOPT_OLD_PLAN, /* no new improved plan can be found, use the old one */
- REOPT_ERROR, /* an irrecovarable error occured during reoptimization */
+ REOPT_ERROR, /* an irrecovarable error occurred during reoptimization */
REOPT_NONE /* not yet reoptimized */
};
@@ -1590,8 +1592,8 @@ bool copy_funcs(Item **func_ptr, const THD *thd);
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
Field* create_tmp_field_from_field(THD *thd, Field* org_field,
const char *name, TABLE *table,
- Item_field *item, uint convert_blob_length);
-
+ Item_field *item);
+
bool is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args);
/* functions from opt_sum.cc */
@@ -1849,8 +1851,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
Field **def_field,
bool group, bool modify_item,
bool table_cant_handle_bit_fields,
- bool make_copy_field,
- uint convert_blob_length);
+ bool make_copy_field);
/*
General routine to change field->ptr of a NULL-terminated array of Field
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index 0138c3e5a3b..196c138c04d 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -205,8 +205,8 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
free_root(&mem, MYF(0));
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
- if (init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0,
- FALSE))
+ if (init_read_record(&read_record_info,thd,table=tables[0].table, NULL, NULL,
+ 1,0, FALSE))
DBUG_RETURN(1);
while (!(read_record_info.read_record(&read_record_info)))
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 533f9fa232e..aa2b47fa4b7 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -39,7 +39,6 @@
#include "tztime.h" // struct Time_zone
#include "sql_acl.h" // TABLE_ACLS, check_grant, DB_ACLS, acl_get,
// check_grant_db
-#include "filesort.h" // filesort_free_buffers
#include "sp.h"
#include "sp_head.h"
#include "sp_pcontext.h"
@@ -1431,14 +1430,13 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
static const char *require_quotes(const char *name, uint name_length)
{
- uint length;
bool pure_digit= TRUE;
const char *end= name + name_length;
for (; name < end ; name++)
{
uchar chr= (uchar) *name;
- length= my_mbcharlen(system_charset_info, chr);
+ int length= my_charlen(system_charset_info, name, end);
if (length == 1 && !system_charset_info->ident_map[chr])
return name;
if (length == 1 && (chr < '0' || chr > '9'))
@@ -1496,24 +1494,25 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
if (packet->append(&quote_char, 1, quote_charset))
return true;
- for (name_end= name+length ; name < name_end ; name+= length)
+ for (name_end= name+length ; name < name_end ; )
{
uchar chr= (uchar) *name;
- length= my_mbcharlen(system_charset_info, chr);
+ int char_length= my_charlen(system_charset_info, name, name_end);
/*
- my_mbcharlen can return 0 on a wrong multibyte
+ charlen can return 0 and negative numbers on a wrong multibyte
sequence. It is possible when upgrading from 4.0,
and identifier contains some accented characters.
The manual says it does not work. So we'll just
- change length to 1 not to hang in the endless loop.
+ change char_length to 1 not to hang in the endless loop.
*/
- if (!length)
- length= 1;
- if (length == 1 && chr == (uchar) quote_char &&
+ if (char_length <= 0)
+ char_length= 1;
+ if (char_length == 1 && chr == (uchar) quote_char &&
packet->append(&quote_char, 1, quote_charset))
return true;
- if (packet->append(name, length, system_charset_info))
+ if (packet->append(name, char_length, system_charset_info))
return true;
+ name+= char_length;
}
return packet->append(&quote_char, 1, quote_charset);
}
@@ -2352,7 +2351,8 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff)
We can't just use table->query, because our SQL_MODE may trigger
a different syntax, like when ANSI_QUOTES is defined.
*/
- table->view->unit.print(buff, QT_ORDINARY);
+ table->view->unit.print(buff, enum_query_type(QT_ORDINARY |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
if (table->with_check != VIEW_CHECK_NONE)
{
@@ -3524,7 +3524,7 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
/* Lookup value is database name */
if (!cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0))
+ strlen(item_field->field_name)))
{
thd->make_lex_string(&lookup_field_vals->db_value,
tmp_str->ptr(), tmp_str->length());
@@ -3533,7 +3533,7 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
else if (!cs->coll->strnncollsp(cs, (uchar *) field_name2,
strlen(field_name2),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0))
+ strlen(item_field->field_name)))
{
thd->make_lex_string(&lookup_field_vals->table_value,
tmp_str->ptr(), tmp_str->length());
@@ -3619,10 +3619,10 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
if (table->table != item_field->field->table ||
(cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0) &&
+ strlen(item_field->field_name)) &&
cs->coll->strnncollsp(cs, (uchar *) field_name2, strlen(field_name2),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0)))
+ strlen(item_field->field_name))))
return 0;
}
else if (item->type() == Item::REF_ITEM)
@@ -4343,7 +4343,7 @@ uint get_table_open_method(TABLE_LIST *tables,
@retval FALSE No error, if lock was obtained TABLE_LIST::mdl_request::ticket
is set to non-NULL value.
- @retval TRUE Some error occured (probably thread was killed).
+ @retval TRUE Some error occurred (probably thread was killed).
*/
static bool
@@ -4451,7 +4451,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
if (try_acquire_high_prio_shared_mdl_lock(thd, &table_list, can_deadlock))
{
/*
- Some error occured (most probably we have been killed while
+ Some error occurred (most probably we have been killed while
waiting for conflicting locks to go away), let the caller to
handle the situation.
*/
@@ -5062,7 +5062,10 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
HA_STATUS_TIME |
HA_STATUS_VARIABLE_EXTRA |
HA_STATUS_AUTO)) != 0)
+ {
+ file->print_error(info_error, MYF(0));
goto err;
+ }
enum row_type row_type = file->get_row_type();
switch (row_type) {
@@ -8066,8 +8069,6 @@ bool get_schema_tables_result(JOIN *join,
table_list->table->file->extra(HA_EXTRA_NO_CACHE);
table_list->table->file->extra(HA_EXTRA_RESET_STATE);
table_list->table->file->ha_delete_all_rows();
- free_io_cache(table_list->table);
- filesort_free_buffers(table_list->table,1);
table_list->table->null_row= 0;
}
else
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index 1622d9df360..6c97ad7e9ab 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -16,8 +16,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#include "m_string.h" /* memset */
-#include "my_global.h" /* uchar */
#include "my_base.h" /* ha_rows */
#include "my_sys.h" /* qsort2_cmp */
#include "queues.h"
@@ -71,7 +69,6 @@ public:
uint rec_length; // Length of sorted records.
uint sort_length; // Length of sorted columns.
uint ref_length; // Length of record ref.
- uint addon_length; // Length of added packed fields.
uint res_length; // Length of records in final sorted file/buffer.
uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
@@ -81,6 +78,8 @@ public:
SORT_FIELD *local_sortorder;
SORT_FIELD *end;
SORT_ADDON_FIELD *addon_field; // Descriptors for companion fields.
+ LEX_STRING addon_buf; // Buffer & length of added packed fields.
+
uchar *unique_buff;
bool not_killable;
char* tmp_buffer;
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 324741fb55e..f6811b020eb 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -28,6 +28,7 @@
#include "key.h"
#include "sql_statistics.h"
#include "opt_range.h"
+#include "uniques.h"
#include "my_atomic.h"
/*
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 8cf20e71f55..40339d599af 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -76,9 +76,9 @@ bool String::real_alloc(uint32 length)
@retval false Either the copy operation is complete or, if the size of the
new buffer is smaller than the currently allocated buffer (if one exists),
- no allocation occured.
+ no allocation occurred.
- @retval true An error occured when attempting to allocate memory.
+ @retval true An error occurred when attempting to allocate memory.
*/
bool String::realloc_raw(uint32 alloc_length)
{
@@ -759,7 +759,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs)
{
return cs->coll->strnncollsp(cs,
(uchar *) s->ptr(),s->length(),
- (uchar *) t->ptr(),t->length(), 0);
+ (uchar *) t->ptr(),t->length());
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 5903f8420b2..dad51139af3 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -90,7 +90,7 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
{
uint res;
uint errors;
- const char *conv_name;
+ const char *conv_name, *conv_name_end;
char tmp_name[FN_REFLEN];
char conv_string[FN_REFLEN];
int quote;
@@ -111,11 +111,13 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
{
DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors));
conv_name= name;
+ conv_name_end= name + name_len;
}
else
{
DBUG_PRINT("info", ("conv '%s' -> '%s'", conv_name, conv_string));
conv_name= conv_string;
+ conv_name_end= conv_string + res;
}
quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '"';
@@ -125,8 +127,8 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
*(to_p++)= (char) quote;
while (*conv_name && (end_p - to_p - 1) > 0)
{
- uint length= my_mbcharlen(system_charset_info, *conv_name);
- if (!length)
+ int length= my_charlen(system_charset_info, conv_name, conv_name_end);
+ if (length <= 0)
length= 1;
if (length == 1 && *conv_name == (char) quote)
{
@@ -9349,15 +9351,14 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
int error= 1;
Copy_field *copy= NULL, *copy_end;
ha_rows found_count= 0, delete_count= 0;
- uint length= 0;
SORT_FIELD *sortorder;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
- ha_rows examined_rows;
- ha_rows found_rows;
bool auto_increment_field_copied= 0;
+ bool init_read_record_done= 0;
ulonglong save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
Field **dfield_ptr= to->default_field;
@@ -9440,9 +9441,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
else
{
- from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
+ uint length= 0;
bzero((char *) &tables, sizeof(tables));
tables.table= from;
tables.alias= tables.table_name= from->s->table_name.str;
@@ -9454,12 +9453,10 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
setup_order(thd, thd->lex->select_lex.ref_pointer_array,
&tables, fields, all_fields, order) ||
!(sortorder= make_unireg_sortorder(thd, order, &length, NULL)) ||
- (from->sort.found_records= filesort(thd, from, sortorder, length,
- NULL, HA_POS_ERROR,
- true,
- &examined_rows, &found_rows,
- &dummy_tracker)) ==
- HA_POS_ERROR)
+ !(file_sort= filesort(thd, from, sortorder, length,
+ NULL, HA_POS_ERROR,
+ true,
+ &dummy_tracker)))
goto err;
}
thd_progress_next_stage(thd);
@@ -9469,8 +9466,10 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
to->mark_virtual_columns_for_write(TRUE);
- if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE))
+ if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, file_sort, 1, 1,
+ FALSE))
goto err;
+ init_read_record_done= 1;
if (ignore && !alter_ctx->fk_error_if_delete_row)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
@@ -9585,9 +9584,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
found_count++;
thd->get_stmt_da()->inc_current_row_for_warning();
}
- end_read_record(&info);
- free_io_cache(from);
- delete [] copy;
THD_STAGE_INFO(thd, stage_enabling_keys);
thd_progress_next_stage(thd);
@@ -9608,6 +9604,12 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
error= 1;
err:
+ /* Free resources */
+ if (init_read_record_done)
+ end_read_record(&info);
+ delete [] copy;
+ delete file_sort;
+
thd->variables.sql_mode= save_sql_mode;
thd->abort_on_warning= 0;
*copied= found_count;
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 272e1445273..63093620805 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1101,6 +1101,7 @@ bool Table_triggers_list::prepare_record_accessors(TABLE *table)
table == (*fld)->table)))
return 1;
+ f->flags= (*fld)->flags;
f->null_ptr= null_ptr;
f->null_bit= null_bit;
if (null_bit == 128)
@@ -1580,7 +1581,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_RETURN(0);
err_with_lex_cleanup:
- // QQ: anything else ?
lex_end(&lex);
thd->lex= old_lex;
thd->spcont= save_spcont;
@@ -2305,7 +2305,7 @@ void Table_triggers_list::mark_fields_used(trg_event_type event)
/**
- Signals to the Table_triggers_list that a parse error has occured when
+ Signals to the Table_triggers_list that a parse error has occurred when
reading a trigger from file. This makes the Table_triggers_list enter an
error state flagged by m_has_unparseable_trigger == true. The error message
will be used whenever a statement invoking or manipulating triggers is
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 0b294b5af8c..502bc88c489 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -180,7 +180,8 @@ void udf_init()
}
table= tables.table;
- if (init_read_record(&read_record_info, new_thd, table, NULL,1,0,FALSE))
+ if (init_read_record(&read_record_info, new_thd, table, NULL, NULL, 1, 0,
+ FALSE))
{
sql_print_error("Could not initialize init_read_record; udf's not "
"loaded");
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 069eadc7519..5685c90850a 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -173,7 +173,8 @@ select_union::create_result_table(THD *thd_arg, List<Item> *column_types,
/**
- Reset and empty the temporary table that stores the materialized query result.
+ Reset and empty the temporary table that stores the materialized query
+ result.
@note The cleanup performed here is exactly the same as for the two temp
tables of JOIN - exec_tmp_table_[1 | 2].
@@ -183,8 +184,6 @@ void select_union::cleanup()
{
table->file->extra(HA_EXTRA_RESET_STATE);
table->file->ha_delete_all_rows();
- free_io_cache(table);
- filesort_free_buffers(table,0);
}
@@ -1161,11 +1160,22 @@ List<Item> *st_select_lex_unit::get_unit_column_types()
return &sl->item_list;
}
+
+static void cleanup_order(ORDER *order)
+{
+ for (; order; order= order->next)
+ order->counter_used= 0;
+}
+
+
bool st_select_lex::cleanup()
{
bool error= FALSE;
DBUG_ENTER("st_select_lex::cleanup()");
+ cleanup_order(order_list.first);
+ cleanup_order(group_list.first);
+
if (join)
{
DBUG_ASSERT((st_select_lex*)join->select_lex == this);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 55e5cf2c526..61c16a905fe 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -270,6 +270,7 @@ int mysql_update(THD *thd,
key_map old_covering_keys;
TABLE *table;
SQL_SELECT *select= NULL;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
SELECT_LEX *select_lex= &thd->lex->select_lex;
ulonglong id;
@@ -420,7 +421,7 @@ int mysql_update(THD *thd,
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
set_statistics_for_table(thd, table);
- select= make_select(table, 0, 0, conds, 0, &error);
+ select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
if (error || !limit || thd->is_error() ||
(select && select->check_quick(thd, safe_update, limit)))
{
@@ -558,26 +559,18 @@ int mysql_update(THD *thd,
*/
uint length= 0;
SORT_FIELD *sortorder;
- ha_rows examined_rows;
- ha_rows found_rows;
- table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
Filesort_tracker *fs_tracker=
thd->lex->explain->get_upd_del_plan()->filesort_tracker;
if (!(sortorder=make_unireg_sortorder(thd, order, &length, NULL)) ||
- (table->sort.found_records= filesort(thd, table, sortorder, length,
- select, limit,
- true,
- &examined_rows, &found_rows,
- fs_tracker))
- == HA_POS_ERROR)
- {
+ !(file_sort= filesort(thd, table, sortorder, length,
+ select, limit,
+ true,
+ fs_tracker)))
goto err;
- }
- thd->inc_examined_row_count(examined_rows);
+ thd->inc_examined_row_count(file_sort->examined_rows);
+
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
@@ -618,7 +611,7 @@ int mysql_update(THD *thd,
*/
if (query_plan.index == MAX_KEY || (select && select->quick))
- error= init_read_record(&info, thd, table, select, 0, 1, FALSE);
+ error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
else
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
reverse);
@@ -662,8 +655,9 @@ int mysql_update(THD *thd,
else
{
/*
- Don't try unlocking the row if skip_record reported an error since in
- this case the transaction might have been rolled back already.
+ Don't try unlocking the row if skip_record reported an
+ error since in this case the transaction might have been
+ rolled back already.
*/
if (error < 0)
{
@@ -712,7 +706,7 @@ int mysql_update(THD *thd,
if (select && select->quick && select->quick->reset())
goto err;
table->file->try_semi_consistent_read(1);
- if (init_read_record(&info, thd, table, select, 0, 1, FALSE))
+ if (init_read_record(&info, thd, table, select, file_sort, 0, 1, FALSE))
goto err;
updated= found= 0;
@@ -1020,6 +1014,7 @@ int mysql_update(THD *thd,
}
DBUG_ASSERT(transactional_table || !updated || thd->transaction.stmt.modified_non_trans_table);
free_underlaid_joins(thd, select_lex);
+ delete file_sort;
/* If LAST_INSERT_ID(X) was used, report X */
id= thd->arg_of_last_insert_id_function ?
@@ -1053,6 +1048,7 @@ int mysql_update(THD *thd,
err:
delete select;
+ delete file_sort;
free_underlaid_joins(thd, select_lex);
table->disable_keyread();
thd->abort_on_warning= 0;
@@ -2592,7 +2588,7 @@ bool multi_update::send_eof()
if (local_error > 0) // if the above log write did not fail ...
{
/* Safety: If we haven't got an error before (can happen in do_updates) */
- my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
+ my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
MYF(0));
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 4dac4f91f74..41fd5b78f04 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -167,7 +167,7 @@ err:
@param item_list List of Items which should be checked
*/
-static void make_valid_column_names(THD *thd, List<Item> &item_list)
+void make_valid_column_names(THD *thd, List<Item> &item_list)
{
Item *item;
uint name_len;
@@ -244,7 +244,7 @@ fill_defined_view_parts (THD *thd, TABLE_LIST *view)
@param mode VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE
@retval FALSE Operation was a success.
- @retval TRUE An error occured.
+ @retval TRUE An error occurred.
*/
bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
@@ -387,7 +387,7 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
@note This function handles both create and alter view commands.
@retval FALSE Operation was a success.
- @retval TRUE An error occured.
+ @retval TRUE An error occurred.
*/
bool mysql_create_view(THD *thd, TABLE_LIST *views,
@@ -901,9 +901,11 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
ulong sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES;
thd->variables.sql_mode&= ~MODE_ANSI_QUOTES;
- lex->unit.print(&view_query, QT_VIEW_INTERNAL);
- lex->unit.print(&is_query,
- enum_query_type(QT_TO_SYSTEM_CHARSET | QT_WITHOUT_INTRODUCERS));
+ lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
+ lex->unit.print(&is_query, enum_query_type(QT_TO_SYSTEM_CHARSET |
+ QT_WITHOUT_INTRODUCERS |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF));
thd->variables.sql_mode|= sql_mode;
}
diff --git a/sql/sql_view.h b/sql/sql_view.h
index 9c75643fd48..b9eb92198f8 100644
--- a/sql/sql_view.h
+++ b/sql/sql_view.h
@@ -56,8 +56,12 @@ bool check_duplicate_names(THD *thd, List<Item>& item_list,
bool mysql_rename_view(THD *thd, const char *new_db, const char *new_name,
TABLE_LIST *view);
+void make_valid_column_names(THD *thd, List<Item> &item_list);
+
#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL)
extern const LEX_STRING view_type;
+void make_valid_column_names(List<Item> &item_list);
+
#endif /* SQL_VIEW_INCLUDED */
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 6b896eabde7..1870b3f719f 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -54,6 +54,7 @@
#include "sql_handler.h" // Sql_cmd_handler_*
#include "sql_signal.h"
#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics
+#include "sql_cte.h"
#include "event_parse_data.h"
#include "create_options.h"
#include <myisam.h>
@@ -988,6 +989,8 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
class sp_label *splabel;
class sp_name *spname;
class sp_variable *spvar;
+ class With_clause *with_clause;
+
handlerton *db_type;
st_select_lex *select_lex;
struct p_elem_val *p_elem_value;
@@ -1485,6 +1488,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token REAL /* SQL-2003-R */
%token REBUILD_SYM
%token RECOVER_SYM
+%token RECURSIVE_SYM
%token REDOFILE_SYM
%token REDO_BUFFER_SIZE_SYM
%token REDUNDANT_SYM
@@ -1769,6 +1773,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
case_stmt_body opt_bin_mod
opt_if_exists_table_element opt_if_not_exists_table_element
opt_into opt_procedure_clause
+ opt_recursive
%type <object_ddl_options>
create_or_replace
@@ -2011,6 +2016,10 @@ END_OF_INPUT
THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM DELETE_SYM
ROLE_SYM
+%type <with_clause> opt_with_clause with_clause
+
+%type <lex_str_ptr> query_name
+
%%
@@ -8457,10 +8466,11 @@ opt_ignore_leaves:
select:
- select_init
+ opt_with_clause select_init
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SELECT;
+ lex->current_select->set_with_clause($1);
}
;
@@ -10886,20 +10896,20 @@ table_factor:
and our parser. Possibly this rule could be replaced by our
query_expression_body.
*/
- | '(' get_select_lex select_derived_union ')' opt_table_alias
+ | '('opt_with_clause get_select_lex select_derived_union ')' opt_table_alias
{
- /* Use $2 instead of Lex->current_select as derived table will
+ /* Use $3 instead of Lex->current_select as derived table will
alter value of Lex->current_select. */
- if (!($3 || $5) && $2->embedding &&
- !$2->embedding->nested_join->join_list.elements)
+ if (!($4 || $6) && $3->embedding &&
+ !$3->embedding->nested_join->join_list.elements)
{
- /* we have a derived table ($3 == NULL) but no alias,
+ /* we have a derived table ($4 == NULL) but no alias,
Since we are nested in further parentheses so we
can pass NULL to the outer level parentheses
Permits parsing of "((((select ...))) as xyz)" */
$$= 0;
}
- else if (!$3)
+ else if (!$4)
{
/* Handle case of derived table, alias may be NULL if there
are no outer parentheses, add_table_to_list() will throw
@@ -10907,12 +10917,13 @@ table_factor:
LEX *lex=Lex;
SELECT_LEX *sel= lex->current_select;
SELECT_LEX_UNIT *unit= sel->master_unit();
+ unit->set_with_clause($2);
lex->current_select= sel= unit->outer_select();
Table_ident *ti= new (thd->mem_root) Table_ident(unit);
if (ti == NULL)
MYSQL_YYABORT;
if (!($$= sel->add_table_to_list(lex->thd,
- ti, $5, 0,
+ ti, $6, 0,
TL_READ, MDL_SHARED_READ)))
MYSQL_YYABORT;
@@ -10920,11 +10931,11 @@ table_factor:
lex->pop_context();
lex->nest_level--;
}
- /*else if (($3->select_lex &&
- $3->select_lex->master_unit()->is_union() &&
- ($3->select_lex->master_unit()->first_select() ==
- $3->select_lex || !$3->lifted)) || $5)*/
- else if ($5 != NULL)
+ /*else if (($4->select_lex &&
+ $4->select_lex->master_unit()->is_union() &&
+ ($4->select_lex->master_unit()->first_select() ==
+ $4->select_lex || !$4->lifted)) || $6)*/
+ else if ($6 != NULL)
{
/*
Tables with or without joins within parentheses cannot
@@ -10937,7 +10948,7 @@ table_factor:
{
/* nested join: FROM (t1 JOIN t2 ...),
nest_level is the same as in the outer query */
- $$= $3;
+ $$= $4;
}
/*
Fields in derived table can be used in upper select in
@@ -13736,8 +13747,93 @@ temporal_literal:
;
+opt_with_clause:
+ /*empty */ { $$= 0; }
+ | with_clause
+ {
+ $$= $1;
+ Lex->derived_tables|= DERIVED_WITH;
+ }
+ ;
+
+
+with_clause:
+ WITH opt_recursive
+ {
+ With_clause *with_clause=
+ new With_clause($2, Lex->curr_with_clause);
+ if (with_clause == NULL)
+ MYSQL_YYABORT;
+ Lex->curr_with_clause= with_clause;
+ with_clause->add_to_list(Lex->with_clauses_list_last_next);
+ }
+ with_list
+ {
+ $$= Lex->curr_with_clause;
+ Lex->curr_with_clause= Lex->curr_with_clause->pop();
+ }
+ ;
+
+
+opt_recursive:
+ /*empty*/ { $$= 0; }
+ | RECURSIVE_SYM { $$= 1; }
+ ;
+
+with_list:
+ with_list_element
+ | with_list ',' with_list_element
+ ;
+
+
+with_list_element:
+ query_name
+ opt_with_column_list
+ AS '(' remember_name subselect remember_end ')'
+ {
+ With_element *elem= new With_element($1, Lex->with_column_list, $6->master_unit());
+ if (elem == NULL || Lex->curr_with_clause->add_with_element(elem))
+ MYSQL_YYABORT;
+ Lex->with_column_list.empty();
+ if (elem->set_unparsed_spec(thd, $5+1, $7))
+ MYSQL_YYABORT;
+ }
+ ;
+
+opt_with_column_list:
+ /* empty */
+ {}
+ | '(' with_column_list ')'
+ ;
+
+
+with_column_list:
+ ident
+ {
+ Lex->with_column_list.push_back((LEX_STRING*)
+ thd->memdup(&$1, sizeof(LEX_STRING)));
+ }
+ | with_column_list ',' ident
+ {
+ Lex->with_column_list.push_back((LEX_STRING*)
+ thd->memdup(&$3, sizeof(LEX_STRING)));
+ }
+ ;
+
+
+query_name:
+ ident
+ {
+ $$= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING));
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+
+
/**********************************************************************
** Creating different items.
**********************************************************************/
@@ -16020,9 +16116,10 @@ query_expression_body:
/* Corresponds to <query expression> in the SQL:2003 standard. */
subselect:
- subselect_start query_expression_body subselect_end
+ subselect_start opt_with_clause query_expression_body subselect_end
{
- $$= $2;
+ $3->set_with_clause($2);
+ $$= $3;
}
;
@@ -16249,7 +16346,7 @@ view_select:
lex->parsing_options.allows_derived= FALSE;
lex->create_view_select.str= (char *) YYLIP->get_cpp_ptr();
}
- view_select_aux view_check_option
+ opt_with_clause view_select_aux view_check_option
{
LEX *lex= Lex;
uint len= YYLIP->get_cpp_ptr() - lex->create_view_select.str;
@@ -16261,6 +16358,7 @@ view_select:
lex->parsing_options.allows_select_into= TRUE;
lex->parsing_options.allows_select_procedure= TRUE;
lex->parsing_options.allows_derived= TRUE;
+ lex->current_select->set_with_clause($2);
}
;
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index b8100e05ce5..bf5fe9d6f00 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -337,7 +337,7 @@ int find_string_in_array(LEX_STRING * const haystack, LEX_STRING * const needle,
const LEX_STRING *pos;
for (pos= haystack; pos->str; pos++)
if (!cs->coll->strnncollsp(cs, (uchar *) pos->str, pos->length,
- (uchar *) needle->str, needle->length, 0))
+ (uchar *) needle->str, needle->length))
{
return (pos - haystack);
}
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 9ba955c5bc6..4bf202813f3 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -283,7 +283,7 @@ static Sys_var_long Sys_pfs_events_stages_history_size(
/**
Variable performance_schema_max_statement_classes.
The default number of statement classes is the sum of:
- - COM_END for all regular "statement/com/...",
+ - (COM_END - mariadb gap) for all regular "statement/com/...",
- 1 for "statement/com/new_packet", for unknown enum_server_command
- 1 for "statement/com/Error", for invalid enum_server_command
- SQLCOM_END for all regular "statement/sql/...",
@@ -295,7 +295,8 @@ static Sys_var_ulong Sys_pfs_max_statement_classes(
"Maximum number of statement instruments.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_statement_class_sizing),
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
- DEFAULT((ulong) SQLCOM_END + (ulong) COM_END + 4),
+ DEFAULT((ulong) SQLCOM_END +
+ (ulong) (COM_END -(COM_MDB_GAP_END - COM_MDB_GAP_BEG + 1)) + 4),
BLOCK_SIZE(1));
static Sys_var_long Sys_pfs_events_statements_history_long_size(
@@ -1183,6 +1184,19 @@ static Sys_var_mybool Sys_log_queries_not_using_indexes(
GLOBAL_VAR(opt_log_queries_not_using_indexes),
CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+static Sys_var_mybool Sys_log_slow_admin_statements(
+ "log_slow_admin_statements",
+ "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to "
+ "the slow log if it is open.",
+ GLOBAL_VAR(opt_log_slow_admin_statements),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+
+static Sys_var_mybool Sys_log_slow_slave_statements(
+ "log_slow_slave_statements",
+ "Log slow statements executed by slave thread to the slow log if it is open.",
+ GLOBAL_VAR(opt_log_slow_slave_statements),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+
static Sys_var_ulong Sys_log_warnings(
"log_warnings",
"Log some not critical warnings to the general log file."
@@ -3193,9 +3207,9 @@ static Sys_var_ulong Sys_table_cache_size(
static Sys_var_ulong Sys_thread_cache_size(
"thread_cache_size",
- "How many threads we should keep in a cache for reuse",
+ "How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time",
GLOBAL_VAR(thread_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 16384), DEFAULT(0), BLOCK_SIZE(1));
+ VALID_RANGE(0, 16384), DEFAULT(256), BLOCK_SIZE(1));
#ifdef HAVE_POOL_OF_THREADS
static bool fix_tp_max_threads(sys_var *, THD *, enum_var_type)
@@ -4615,8 +4629,7 @@ static bool check_locale(sys_var *self, THD *thd, set_var *var)
mysql_mutex_lock(&LOCK_error_messages);
res= (!locale->errmsgs->errmsgs &&
read_texts(ERRMSG_FILE, locale->errmsgs->language,
- &locale->errmsgs->errmsgs,
- ER_ERROR_LAST - ER_ERROR_FIRST + 1));
+ &locale->errmsgs->errmsgs));
mysql_mutex_unlock(&LOCK_error_messages);
if (res)
{
diff --git a/sql/table.cc b/sql/table.cc
index 98421ec2aac..07e2876f5ba 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -7281,7 +7281,9 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
*/
if (is_merged_derived())
{
- if (is_view() || unit->prepared)
+ if (is_view() ||
+ (unit->prepared &&
+ !(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW)))
create_field_translation(thd);
}
@@ -7423,6 +7425,11 @@ void TABLE_LIST::set_lock_type(THD *thd, enum thr_lock_type lock)
}
}
+bool TABLE_LIST::is_with_table()
+{
+ return derived && derived->with_element;
+}
+
uint TABLE_SHARE::actual_n_key_parts(THD *thd)
{
return use_ext_keys &&
diff --git a/sql/table.h b/sql/table.h
index c45e86b695e..1c461d96097 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -48,6 +48,7 @@ class ACL_internal_schema_access;
class ACL_internal_table_access;
class Field;
class Table_statistics;
+class With_element;
class TDC_element;
/*
@@ -321,55 +322,6 @@ enum enum_vcol_update_mode
VCOL_UPDATE_ALL
};
-class Filesort_info
-{
- /// Buffer for sorting keys.
- Filesort_buffer filesort_buffer;
-
-public:
- IO_CACHE *io_cache; /* If sorted through filesort */
- uchar *buffpek; /* Buffer for buffpek structures */
- uint buffpek_len; /* Max number of buffpeks in the buffer */
- uchar *addon_buf; /* Pointer to a buffer if sorted with fields */
- size_t addon_length; /* Length of the buffer */
- struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
- void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); /* To unpack back */
- uchar *record_pointers; /* If sorted in memory */
- ha_rows found_records; /* How many records in sort */
-
- /** Sort filesort_buffer */
- void sort_buffer(Sort_param *param, uint count)
- { filesort_buffer.sort_buffer(param, count); }
-
- /**
- Accessors for Filesort_buffer (which @c).
- */
- uchar *get_record_buffer(uint idx)
- { return filesort_buffer.get_record_buffer(idx); }
-
- uchar **get_sort_keys()
- { return filesort_buffer.get_sort_keys(); }
-
- uchar **alloc_sort_buffer(uint num_records, uint record_length)
- { return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
-
- bool check_sort_buffer_properties(uint num_records, uint record_length)
- {
- return filesort_buffer.check_sort_buffer_properties(num_records,
- record_length);
- }
-
- void free_sort_buffer()
- { filesort_buffer.free_sort_buffer(); }
-
- void init_record_pointers()
- { filesort_buffer.init_record_pointers(); }
-
- size_t sort_buffer_size() const
- { return filesort_buffer.sort_buffer_size(); }
-};
-
-
class Field_blob;
class Table_triggers_list;
@@ -490,9 +442,6 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db,
const LEX_STRING *name);
-struct TABLE_share;
-struct All_share_tables;
-
typedef struct st_table_field_type
{
LEX_STRING name;
@@ -977,6 +926,57 @@ struct TABLE_SHARE
};
+/**
+ Class is used as a BLOB field value storage for
+ intermediate GROUP_CONCAT results. Used only for
+ GROUP_CONCAT with DISTINCT or ORDER BY options.
+ */
+
+class Blob_mem_storage: public Sql_alloc
+{
+private:
+ MEM_ROOT storage;
+ /**
+ Sign that some values were cut
+ during saving into the storage.
+ */
+ bool truncated_value;
+public:
+ Blob_mem_storage() :truncated_value(false)
+ {
+ init_alloc_root(&storage, MAX_FIELD_VARCHARLENGTH, 0, MYF(0));
+ }
+ ~ Blob_mem_storage()
+ {
+ free_root(&storage, MYF(0));
+ }
+ void reset()
+ {
+ free_root(&storage, MYF(MY_MARK_BLOCKS_FREE));
+ truncated_value= false;
+ }
+ /**
+ Fuction creates duplicate of 'from'
+ string in 'storage' MEM_ROOT.
+
+ @param from string to copy
+ @param length string length
+
+ @retval Pointer to the copied string.
+ @retval 0 if an error occured.
+ */
+ char *store(const char *from, uint length)
+ {
+ return (char*) memdup_root(&storage, from, length);
+ }
+ void set_truncated_value(bool is_truncated_value)
+ {
+ truncated_value= is_truncated_value;
+ }
+ bool is_truncated_value() { return truncated_value; }
+};
+
+
/* Information for one open table */
enum index_hint_type
{
@@ -1008,7 +1008,7 @@ private:
One should use methods of I_P_List template instead.
*/
TABLE *share_all_next, **share_all_prev;
- friend struct All_share_tables;
+ friend class TDC_element;
public:
@@ -1247,8 +1247,13 @@ public:
REGINFO reginfo; /* field connections */
MEM_ROOT mem_root;
+ /**
+ Initialized in Item_func_group_concat::setup for appropriate
+ temporary table if GROUP_CONCAT is used with ORDER BY | DISTINCT
+ and BLOB field count > 0.
+ */
+ Blob_mem_storage *blob_storage;
GRANT_INFO grant;
- Filesort_info sort;
/*
The arena which the items for expressions from the table definition
are associated with.
@@ -1409,19 +1414,6 @@ struct TABLE_share
};
-struct All_share_tables
-{
- static inline TABLE **next_ptr(TABLE *l)
- {
- return &l->share_all_next;
- }
- static inline TABLE ***prev_ptr(TABLE *l)
- {
- return &l->share_all_prev;
- }
-};
-
-
enum enum_schema_table_state
{
NOT_PROCESSED= 0,
@@ -1840,6 +1832,7 @@ struct TABLE_LIST
derived tables. Use TABLE_LIST::is_anonymous_derived_table().
*/
st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */
+ With_element *with; /* With element of with_table */
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
st_select_lex *schema_select_lex;
/*
@@ -2204,6 +2197,7 @@ struct TABLE_LIST
{
return (derived_type & DTYPE_TABLE);
}
+ bool is_with_table();
inline void set_view()
{
derived_type= DTYPE_VIEW;
@@ -2244,6 +2238,7 @@ struct TABLE_LIST
{
derived_type|= DTYPE_MULTITABLE;
}
+ bool set_as_with_table(THD *thd, With_element *with_elem);
void reset_const_table();
bool handle_derived(LEX *lex, uint phases);
diff --git a/sql/table_cache.cc b/sql/table_cache.cc
index 5e7a69e21ed..b6c1e32b350 100644
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@ -841,7 +841,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
const char *db, const char *table_name,
bool kill_delayed_threads)
{
- I_P_List <TABLE, TABLE_share> purge_tables;
+ TDC_element::TABLE_list purge_tables;
TABLE *table;
TDC_element *element;
uint my_refs= 1;
diff --git a/sql/table_cache.h b/sql/table_cache.h
index a3e6715d5d0..2efc535c425 100644
--- a/sql/table_cache.h
+++ b/sql/table_cache.h
@@ -31,7 +31,9 @@ public:
TABLE_SHARE *share;
typedef I_P_List <TABLE, TABLE_share> TABLE_list;
- typedef I_P_List <TABLE, All_share_tables> All_share_tables_list;
+ typedef I_P_List <TABLE, I_P_List_adapter<TABLE, &TABLE::share_all_next,
+ &TABLE::share_all_prev> >
+ All_share_tables_list;
/**
Protects ref_count, m_flush_tickets, all_tables, free_tables, flushed,
all_tables_refs.
diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc
index 1281756f6bc..2ca815a6062 100644
--- a/sql/threadpool_win.cc
+++ b/sql/threadpool_win.cc
@@ -390,7 +390,7 @@ int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
return 0;
}
- /* Some error occured */
+ /* Some error occurred */
CancelThreadpoolIo(io);
return -1;
}
@@ -576,7 +576,7 @@ static VOID CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
return;
error:
- /* Some error has occured. */
+ /* Some error has occurred. */
destroy_connection(connection, instance);
free(connection);
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f94e10c4877..ce0272d996d 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -309,7 +309,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
Note: See description of TIME_to_gmt_sec() function first.
In order to perform MYSQL_TIME -> my_time_t conversion we need to build table
which defines "shifted by tz offset and leap seconds my_time_t" ->
- my_time_t function wich is almost the same (except ranges of ambiguity)
+ my_time_t function which is almost the same (except ranges of ambiguity)
as reverse function to piecewise linear function used for my_time_t ->
"shifted my_time_t" conversion and which is also specified as table in
zoneinfo file or in our db (It is specified as start of time type ranges
@@ -612,7 +612,7 @@ sec_to_TIME(MYSQL_TIME * tmp, my_time_t t, long offset)
/*
- Find time range wich contains given my_time_t value
+ Find time range which contains given my_time_t value
SYNOPSIS
find_time_range()
@@ -708,7 +708,7 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp)
TODO
We can improve this function by creating joined array of transitions and
leap corrections. This will require adding extra field to TRAN_TYPE_INFO
- for storing number of "extra" seconds to minute occured due to correction
+ for storing number of "extra" seconds to minute occurred due to correction
(60th and 61st second, look how we calculate them as "hit" in this
function).
Under realistic assumptions about frequency of transitions the same array
@@ -2769,7 +2769,7 @@ main(int argc, char **argv)
#ifdef TESTTIME
/*
- Some simple brute-force test wich allowed to catch a pair of bugs.
+ Some simple brute-force test which allowed to catch a pair of bugs.
Also can provide interesting facts about system's time zone support
implementation.
*/
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 63eb6e0eb90..f2fa0bf7b1a 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -37,7 +37,9 @@
#include "sql_sort.h"
#include "queues.h" // QUEUE
#include "my_tree.h" // element_count
-#include "sql_class.h" // Unique
+#include "uniques.h" // Unique
+#include "sql_sort.h"
+#include "myisamchk.h" // BUFFPEK
int unique_write_to_file(uchar* key, element_count count, Unique *unique)
{
@@ -58,8 +60,8 @@ int unique_write_to_file_with_count(uchar* key, element_count count, Unique *uni
int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique)
{
- memcpy(unique->record_pointers, key, unique->size);
- unique->record_pointers+=unique->size;
+ memcpy(unique->sort.record_pointers, key, unique->size);
+ unique->sort.record_pointers+=unique->size;
return 0;
}
@@ -67,8 +69,8 @@ int unique_intersect_write_to_ptrs(uchar* key, element_count count, Unique *uniq
{
if (count >= unique->min_dupl_count)
{
- memcpy(unique->record_pointers, key, unique->size);
- unique->record_pointers+=unique->size;
+ memcpy(unique->sort.record_pointers, key, unique->size);
+ unique->sort.record_pointers+=unique->size;
}
else
unique->filtered_out_elems++;
@@ -80,16 +82,15 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
uint size_arg, ulonglong max_in_memory_size_arg,
uint min_dupl_count_arg)
:max_in_memory_size(max_in_memory_size_arg),
- record_pointers(NULL),
size(size_arg),
elements(0)
{
+ my_b_clear(&file);
min_dupl_count= min_dupl_count_arg;
full_size= size;
if (min_dupl_count_arg)
full_size+= sizeof(element_count);
with_counters= MY_TEST(min_dupl_count_arg);
- my_b_clear(&file);
init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func,
NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC));
/* If the following fail's the next add will also fail */
@@ -408,8 +409,10 @@ Unique::reset()
reset_dynamic(&file_ptrs);
reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1);
}
+ my_free(sort.record_pointers);
elements= 0;
tree.flag= 0;
+ sort.record_pointers= 0;
}
/*
@@ -636,7 +639,7 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
if (elements == 0) /* the whole tree is in memory */
return tree_walk(&tree, action, walk_action_arg, left_root_right);
- table->sort.found_records=elements+tree.elements_in_tree;
+ sort.return_rows= elements+tree.elements_in_tree;
/* flush current tree to the file to have some memory for merge buffer */
if (flush())
return 1;
@@ -663,9 +666,11 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
/*
DESCRIPTION
- Perform multi-pass sort merge of the elements accessed through table->sort,
- using the buffer buff as the merge buffer. The last pass is not performed
- if without_last_merge is TRUE.
+
+ Perform multi-pass sort merge of the elements using the buffer buff as
+ the merge buffer. The last pass is not performed if without_last_merge is
+ TRUE.
+
SYNOPSIS
Unique:merge()
All params are 'IN':
@@ -679,23 +684,19 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
{
- IO_CACHE *outfile= table->sort.io_cache;
+ IO_CACHE *outfile= &sort.io_cache;
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
uint maxbuffer= file_ptrs.elements - 1;
my_off_t save_pos;
bool error= 1;
+ Sort_param sort_param;
- /* Open cached file if it isn't open */
- if (!outfile)
- outfile= table->sort.io_cache= (IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_THREAD_SPECIFIC|MY_ZEROFILL));
- if (!outfile ||
- (! my_b_inited(outfile) &&
- open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
- MYF(MY_WME))))
+ /* Open cached file for table records if it isn't open */
+ if (! my_b_inited(outfile) &&
+ open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
+ MYF(MY_WME)))
return 1;
- Sort_param sort_param;
bzero((char*) &sort_param,sizeof(sort_param));
sort_param.max_rows= elements;
sort_param.sort_form= table;
@@ -744,44 +745,49 @@ err:
/*
- Modify the TABLE element so that when one calls init_records()
- the rows will be read in priority order.
+ Allocate memory that can be used with init_records() so that
+ rows will be read in priority order.
*/
bool Unique::get(TABLE *table)
{
bool rc= 1;
uchar *sort_buffer= NULL;
- table->sort.found_records= elements+tree.elements_in_tree;
+ sort.return_rows= elements+tree.elements_in_tree;
+ DBUG_ENTER("Unique::get");
if (my_b_tell(&file) == 0)
{
/* Whole tree is in memory; Don't use disk if you don't need to */
- if ((record_pointers=table->sort.record_pointers= (uchar*)
+ if ((sort.record_pointers= (uchar*)
my_malloc(size * tree.elements_in_tree, MYF(MY_THREAD_SPECIFIC))))
{
+ uchar *save_record_pointers= sort.record_pointers;
tree_walk_action action= min_dupl_count ?
(tree_walk_action) unique_intersect_write_to_ptrs :
(tree_walk_action) unique_write_to_ptrs;
filtered_out_elems= 0;
(void) tree_walk(&tree, action,
this, left_root_right);
- table->sort.found_records-= filtered_out_elems;
- return 0;
+ /* Restore record_pointers that was changed in by 'action' above */
+ sort.record_pointers= save_record_pointers;
+ sort.return_rows-= filtered_out_elems;
+ DBUG_RETURN(0);
}
}
/* Not enough memory; Save the result to file && free memory used by tree */
if (flush())
- return 1;
+ DBUG_RETURN(1);
size_t buff_sz= (max_in_memory_size / full_size + 1) * full_size;
- if (!(sort_buffer= (uchar*) my_malloc(buff_sz, MYF(MY_THREAD_SPECIFIC|MY_WME))))
- return 1;
+ if (!(sort_buffer= (uchar*) my_malloc(buff_sz,
+ MYF(MY_THREAD_SPECIFIC|MY_WME))))
+ DBUG_RETURN(1);
if (merge(table, sort_buffer, FALSE))
- goto err;
+ goto err;
rc= 0;
err:
my_free(sort_buffer);
- return rc;
+ DBUG_RETURN(rc);
}
diff --git a/sql/uniques.h b/sql/uniques.h
new file mode 100644
index 00000000000..0210e879788
--- /dev/null
+++ b/sql/uniques.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2016 MariaDB corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef UNIQUE_INCLUDED
+#define UNIQUE_INCLUDED
+
+#include "filesort.h"
+
+/*
+ Unique -- class for unique (removing of duplicates).
+ Puts all values to the TREE. If the tree becomes too big,
+ it's dumped to the file. User can request sorted values, or
+ just iterate through them. In the last case tree merging is performed in
+ memory simultaneously with iteration, so it should be ~2-3x faster.
+ */
+
+class Unique :public Sql_alloc
+{
+ DYNAMIC_ARRAY file_ptrs;
+ ulong max_elements;
+ ulonglong max_in_memory_size;
+ IO_CACHE file;
+ TREE tree;
+ ulong filtered_out_elems;
+ uint size;
+ uint full_size;
+ uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
+ bool with_counters;
+
+ bool merge(TABLE *table, uchar *buff, bool without_last_merge);
+ bool flush();
+
+public:
+ ulong elements;
+ SORT_INFO sort;
+ Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
+ uint size_arg, ulonglong max_in_memory_size_arg,
+ uint min_dupl_count_arg= 0);
+ ~Unique();
+ ulong elements_in_tree() { return tree.elements_in_tree; }
+ inline bool unique_add(void *ptr)
+ {
+ DBUG_ENTER("unique_add");
+ DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
+ if (!(tree.flag & TREE_ONLY_DUPS) &&
+ tree.elements_in_tree >= max_elements && flush())
+ DBUG_RETURN(1);
+ DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
+ }
+
+ bool is_in_memory() { return (my_b_tell(&file) == 0); }
+ void close_for_expansion() { tree.flag= TREE_ONLY_DUPS; }
+
+ bool get(TABLE *table);
+
+ /* Cost of searching for an element in the tree */
+ inline static double get_search_cost(ulonglong tree_elems, uint compare_factor)
+ {
+ return log((double) tree_elems) / (compare_factor * M_LN2);
+ }
+
+ static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
+ ulonglong max_in_memory_size, uint compare_factor,
+ bool intersect_fl, bool *in_memory);
+ inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
+ ulonglong max_in_memory_size)
+ {
+ register ulonglong max_elems_in_tree=
+ max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
+ return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
+ }
+
+ void reset();
+ bool walk(TABLE *table, tree_walk_action action, void *walk_action_arg);
+
+ uint get_size() const { return size; }
+ ulonglong get_max_in_memory_size() const { return max_in_memory_size; }
+
+ friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
+ friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
+
+ friend int unique_write_to_file_with_count(uchar* key, element_count count,
+ Unique *unique);
+ friend int unique_intersect_write_to_ptrs(uchar* key, element_count count,
+ Unique *unique);
+};
+
+#endif /* UNIQUE_INCLUDED */
diff --git a/sql/unireg.h b/sql/unireg.h
index 10751b6ec93..251597c1884 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -43,15 +43,16 @@
#define PLUGINDIR "lib/plugin"
#endif
-#define CURRENT_THD_ERRMSGS current_thd->variables.errmsgs
-#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs
-
-#define ER(X) CURRENT_THD_ERRMSGS[(X) - ER_ERROR_FIRST]
-#define ER_DEFAULT(X) DEFAULT_ERRMSGS[(X) - ER_ERROR_FIRST]
-#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code")
-#define ER_SAFE_THD(T,X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER_THD(T,X) : "Invalid error code")
-#define ER_THD(thd,X) ((thd)->variables.errmsgs[(X) - ER_ERROR_FIRST])
-#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, X) : ER_DEFAULT(X))
+#define MAX_ERROR_RANGES 4 /* 1000-2000, 2000-3000, 3000-4000, 4000-5000 */
+#define ERRORS_PER_RANGE 1000
+
+#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs
+#define CURRENT_THD_ERRMSGS (current_thd)->variables.errmsgs
+
+#define ER_DEFAULT(X) DEFAULT_ERRMSGS[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X)% ERRORS_PER_RANGE]
+#define ER_THD(thd,X) ((thd)->variables.errmsgs[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X) % ERRORS_PER_RANGE])
+#define ER(X) ER_THD(current_thd, (X))
+#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, (X)) : ER_DEFAULT(X))
#define ME_INFO (ME_HOLDTANG+ME_OLDWIN+ME_NOREFRESH)
#define ME_ERROR (ME_BELL+ME_OLDWIN+ME_NOREFRESH)
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index c75f2c116ec..6d04527cbcb 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -875,7 +875,7 @@ static int sst_donate_mysqldump (const char* addr,
host, port, mysqld_port, mysqld_unix_port,
wsrep_defaults_file, uuid_str,
(long long)seqno, wsrep_gtid_domain_id,
- bypass ? " "WSREP_SST_OPT_BYPASS : "");
+ bypass ? " " WSREP_SST_OPT_BYPASS : "");
if (ret < 0 || ret >= cmd_len)
{
@@ -896,6 +896,56 @@ static int sst_donate_mysqldump (const char* addr,
wsrep_seqno_t wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED;
+
+/*
+ Create a file under data directory.
+*/
+static int sst_create_file(const char *name, const char *content)
+{
+ int err= 0;
+ char *real_name;
+ char *tmp_name;
+ ssize_t len;
+ FILE *file;
+
+ len= strlen(mysql_real_data_home) + strlen(name) + 2;
+ real_name= (char *) alloca(len);
+
+ snprintf(real_name, (size_t) len, "%s/%s", mysql_real_data_home, name);
+
+ tmp_name= (char *) alloca(len + 4);
+ snprintf(tmp_name, (size_t) len + 4, "%s.tmp", real_name);
+
+ file= fopen(tmp_name, "w+");
+
+ if (0 == file)
+ {
+ err= errno;
+ WSREP_ERROR("Failed to open '%s': %d (%s)", tmp_name, err, strerror(err));
+ }
+ else
+ {
+ // Write the specified content into the file.
+ if (content != NULL)
+ {
+ fprintf(file, "%s\n", content);
+ fsync(fileno(file));
+ }
+
+ fclose(file);
+
+ if (rename(tmp_name, real_name) == -1)
+ {
+ err= errno;
+ WSREP_ERROR("Failed to rename '%s' to '%s': %d (%s)", tmp_name,
+ real_name, err, strerror(err));
+ }
+ }
+
+ return err;
+}
+
+
static int run_sql_command(THD *thd, const char *query)
{
thd->set_query((char *)query, strlen(query));
@@ -907,11 +957,11 @@ static int run_sql_command(THD *thd, const char *query)
return -1;
}
- mysql_parse(thd, thd->query(), thd->query_length(), &ps);
+ mysql_parse(thd, thd->query(), thd->query_length(), &ps, FALSE);
if (thd->is_error())
{
int const err= thd->get_stmt_da()->sql_errno();
- WSREP_WARN ("error executing '%s': %d (%s)%s",
+ WSREP_WARN ("Error executing '%s': %d (%s)%s",
query, err, thd->get_stmt_da()->message(),
err == ER_UNKNOWN_SYSTEM_VARIABLE ?
". Was mysqld built with --with-innodb-disallow-writes ?" : "");
@@ -921,15 +971,21 @@ static int run_sql_command(THD *thd, const char *query)
return 0;
}
+
static int sst_flush_tables(THD* thd)
{
WSREP_INFO("Flushing tables for SST...");
- int err;
+ int err= 0;
int not_used;
- CHARSET_INFO *current_charset;
+ /*
+ Files created to notify the SST script about the outcome of table flush
+ operation.
+ */
+ const char *flush_success= "tables_flushed";
+ const char *flush_error= "sst_error";
- current_charset = thd->variables.character_set_client;
+ CHARSET_INFO *current_charset= thd->variables.character_set_client;
if (!is_supported_parser_charset(current_charset))
{
@@ -942,61 +998,55 @@ static int sst_flush_tables(THD* thd)
if (run_sql_command(thd, "FLUSH TABLES WITH READ LOCK"))
{
- WSREP_ERROR("Failed to flush and lock tables");
- err = -1;
+ err= -1;
}
else
{
- /* make sure logs are flushed after global read lock acquired */
- err= reload_acl_and_cache(thd, REFRESH_ENGINE_LOG | REFRESH_BINARY_LOG,
- (TABLE_LIST*) 0, &not_used);
+ /*
+ Make sure logs are flushed after global read lock acquired. In case
+ reload fails, we must also release the acquired FTWRL.
+ */
+ if (reload_acl_and_cache(thd, REFRESH_ENGINE_LOG | REFRESH_BINARY_LOG,
+ (TABLE_LIST*) 0, &not_used))
+ {
+ thd->global_read_lock.unlock_global_read_lock(thd);
+ err= -1;
+ }
}
thd->variables.character_set_client = current_charset;
-
if (err)
{
- WSREP_ERROR("Failed to flush tables: %d (%s)", err, strerror(err));
+ WSREP_ERROR("Failed to flush and lock tables");
+
+ /*
+ The SST must be aborted as the flush tables failed. Notify this to SST
+ script by creating the error file.
+ */
+ int tmp;
+ if ((tmp= sst_create_file(flush_error, NULL))) {
+ err= tmp;
+ }
}
else
{
WSREP_INFO("Tables flushed.");
- const char base_name[]= "tables_flushed";
-
- ssize_t const full_len= strlen(mysql_real_data_home) + strlen(base_name)+2;
- char *real_name= (char *) alloca(full_len);
- snprintf(real_name, (size_t) full_len, "%s/%s", mysql_real_data_home,
- base_name);
- char *tmp_name= (char *) alloca(full_len + 4);
- snprintf(tmp_name, (size_t) full_len + 4, "%s.tmp", real_name);
- FILE* file= fopen(tmp_name, "w+");
- if (0 == file)
- {
- err= errno;
- WSREP_ERROR("Failed to open '%s': %d (%s)", tmp_name, err,strerror(err));
- }
- else
- {
- // Write cluster state ID and wsrep_gtid_domain_id.
- fprintf(file, "%s:%lld %d\n",
- wsrep_cluster_state_uuid, (long long)wsrep_locked_seqno,
- wsrep_gtid_domain_id);
- fsync(fileno(file));
- fclose(file);
- if (rename(tmp_name, real_name) == -1)
- {
- err= errno;
- WSREP_ERROR("Failed to rename '%s' to '%s': %d (%s)",
- tmp_name, real_name, err,strerror(err));
- }
- }
+ /*
+ Tables have been flushed. Create a file with cluster state ID and
+ wsrep_gtid_domain_id.
+ */
+ char content[100];
+ snprintf(content, sizeof(content), "%s:%lld %d\n", wsrep_cluster_state_uuid,
+ (long long)wsrep_locked_seqno, wsrep_gtid_domain_id);
+ err= sst_create_file(flush_success, content);
}
return err;
}
+
static void sst_disallow_writes (THD* thd, bool yes)
{
char query_str[64] = { 0, };
@@ -1170,7 +1220,7 @@ static int sst_donate_other (const char* method,
wsrep_defaults_file,
binlog_opt, binlog_opt_val,
uuid, (long long) seqno, wsrep_gtid_domain_id,
- bypass ? " "WSREP_SST_OPT_BYPASS : "");
+ bypass ? " " WSREP_SST_OPT_BYPASS : "");
my_free(binlog_opt_val);
if (ret < 0 || ret >= cmd_len)