summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysql.cc3
-rw-r--r--client/mysql_plugin.c84
-rw-r--r--client/mysqldump.c2
-rw-r--r--client/mysqltest.cc15
-rw-r--r--cmake/install_macros.cmake2
-rw-r--r--dbug/dbug.c2
-rw-r--r--extra/innochecksum.cc8
-rw-r--r--extra/mariabackup/backup_copy.cc49
-rw-r--r--extra/mariabackup/xbcloud.cc7
-rw-r--r--extra/mariabackup/xtrabackup.cc11
-rw-r--r--include/m_string.h38
-rw-r--r--mysql-test/main/cte_recursive.result172
-rw-r--r--mysql-test/main/cte_recursive.test123
-rw-r--r--mysql-test/main/derived_cond_pushdown.result63
-rw-r--r--mysql-test/main/derived_cond_pushdown.test49
-rw-r--r--mysql-test/main/win.result46
-rw-r--r--mysql-test/main/win.test40
-rw-r--r--mysql-test/main/win_orderby.result63
-rw-r--r--mysql-test/main/win_orderby.test55
-rw-r--r--mysql-test/suite/binlog/r/binlog_recovery_after_checksum_change.result8
-rw-r--r--mysql-test/suite/binlog/t/innodb_rc_insert_before_delete.test89
-rw-r--r--mysql-test/suite/encryption/r/tempfiles_encrypted.result46
-rw-r--r--mysql-test/suite/mariabackup/incremental_drop_db.result30
-rw-r--r--mysql-test/suite/mariabackup/incremental_drop_db.test68
-rw-r--r--mysql-test/suite/rpl/include/create_or_drop_sync_func.inc75
-rw-r--r--mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result60
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel_analyze.result76
-rw-r--r--mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test133
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel_analyze.test84
-rw-r--r--sql/item.cc2
-rw-r--r--sql/item_sum.h9
-rw-r--r--sql/mysql_install_db.cc2
-rw-r--r--sql/mysqld.cc15
-rw-r--r--sql/rpl_parallel.cc3
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/slave.h12
-rw-r--r--sql/sql_admin.cc6
-rw-r--r--sql/sql_class.h6
-rw-r--r--sql/sql_cte.cc91
-rw-r--r--sql/sql_cte.h5
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_lex.h7
-rw-r--r--sql/sql_list.h2
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_select.cc60
-rw-r--r--sql/sql_select.h22
-rw-r--r--storage/connect/array.cpp6
-rw-r--r--storage/connect/bson.cpp10
-rw-r--r--storage/connect/bsonudf.cpp20
-rw-r--r--storage/connect/filamdbf.cpp2
-rw-r--r--storage/connect/filamfix.cpp17
-rw-r--r--storage/connect/filamgz.cpp19
-rw-r--r--storage/connect/filamtxt.cpp10
-rw-r--r--storage/connect/filamvct.cpp39
-rw-r--r--storage/connect/filamzip.cpp10
-rw-r--r--storage/connect/javaconn.cpp27
-rw-r--r--storage/connect/json.cpp8
-rw-r--r--storage/connect/jsonudf.cpp21
-rw-r--r--storage/connect/myconn.cpp12
-rw-r--r--storage/mroonga/vendor/groonga/lib/alloc.c2
61 files changed, 1699 insertions, 293 deletions
diff --git a/client/mysql.cc b/client/mysql.cc
index 6c4245fcd90..02ffacc722f 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -3596,7 +3596,10 @@ print_table_data(MYSQL_RES *result)
{
print_field_types(result);
if (!mysql_num_rows(result))
+ {
+ my_afree((uchar*) num_flag);
return;
+ }
mysql_field_seek(result,0);
}
separator.copy("+",1,charset_info);
diff --git a/client/mysql_plugin.c b/client/mysql_plugin.c
index f496db4e72b..d87d4269f89 100644
--- a/client/mysql_plugin.c
+++ b/client/mysql_plugin.c
@@ -569,14 +569,14 @@ static int file_exists(char * filename)
@retval int error = 1, success = 0
*/
-static int search_dir(const char * base_path, const char *tool_name,
+static int search_dir(const char *base_path, const char *tool_name,
const char *subdir, char *tool_path)
{
char new_path[FN_REFLEN];
char source_path[FN_REFLEN];
- strcpy(source_path, base_path);
- strcat(source_path, subdir);
+ safe_strcpy(source_path, sizeof(source_path), base_path);
+ safe_strcat(source_path, sizeof(source_path), subdir);
fn_format(new_path, tool_name, source_path, "", MY_UNPACK_FILENAME);
if (file_exists(new_path))
{
@@ -632,7 +632,7 @@ static int load_plugin_data(char *plugin_name, char *config_file)
FILE *file_ptr;
char path[FN_REFLEN];
char line[1024];
- char *reason= 0;
+ const char *reason= 0;
char *res;
int i= -1;
@@ -643,14 +643,14 @@ static int load_plugin_data(char *plugin_name, char *config_file)
}
if (!file_exists(opt_plugin_ini))
{
- reason= (char *)"File does not exist.";
+ reason= "File does not exist.";
goto error;
}
file_ptr= fopen(opt_plugin_ini, "r");
if (file_ptr == NULL)
{
- reason= (char *)"Cannot open file.";
+ reason= "Cannot open file.";
goto error;
}
@@ -660,17 +660,20 @@ static int load_plugin_data(char *plugin_name, char *config_file)
/* Read plugin components */
while (i < 16)
{
+ size_t line_len;
+
res= fgets(line, sizeof(line), file_ptr);
+ line_len= strlen(line);
+
/* strip /n */
- if (line[strlen(line)-1] == '\n')
- {
- line[strlen(line)-1]= '\0';
- }
+ if (line[line_len - 1] == '\n')
+ line[line_len - 1]= '\0';
+
if (res == NULL)
{
if (i < 1)
{
- reason= (char *)"Bad format in plugin configuration file.";
+ reason= "Bad format in plugin configuration file.";
fclose(file_ptr);
goto error;
}
@@ -683,14 +686,19 @@ static int load_plugin_data(char *plugin_name, char *config_file)
if (i == -1) /* if first pass, read this line as so_name */
{
/* Add proper file extension for soname */
- strcat(line, FN_SOEXT);
+ if (safe_strcpy(line + line_len - 1, sizeof(line), FN_SOEXT))
+ {
+ reason= "Plugin name too long.";
+ fclose(file_ptr);
+ goto error;
+ }
/* save so_name */
plugin_data.so_name= my_strdup(line, MYF(MY_WME|MY_ZEROFILL));
i++;
}
else
{
- if (strlen(line) > 0)
+ if (line_len > 0)
{
plugin_data.components[i]= my_strdup(line, MYF(MY_WME));
i++;
@@ -779,14 +787,13 @@ static int check_options(int argc, char **argv, char *operation)
/* read the plugin config file and check for match against argument */
else
{
- if (strlen(argv[i]) + 4 + 1 > FN_REFLEN)
+ if (safe_strcpy(plugin_name, sizeof(plugin_name), argv[i]) ||
+ safe_strcpy(config_file, sizeof(config_file), argv[i]) ||
+ safe_strcat(config_file, sizeof(config_file), ".ini"))
{
fprintf(stderr, "ERROR: argument is too long.\n");
return 1;
}
- strcpy(plugin_name, argv[i]);
- strcpy(config_file, argv[i]);
- strcat(config_file, ".ini");
}
}
@@ -855,35 +862,30 @@ static int check_options(int argc, char **argv, char *operation)
static int process_options(int argc, char *argv[], char *operation)
{
int error= 0;
- int i= 0;
/* Parse and execute command-line options */
if ((error= handle_options(&argc, &argv, my_long_options, get_one_option)))
- goto exit;
+ return error;
/* If the print defaults option used, exit. */
if (opt_print_defaults)
- {
- error= -1;
- goto exit;
- }
+ return -1;
/* Add a trailing directory separator if not present */
if (opt_basedir)
{
- i= (int)strlength(opt_basedir);
- if (opt_basedir[i-1] != FN_LIBCHAR || opt_basedir[i-1] != FN_LIBCHAR2)
+ size_t basedir_len= strlength(opt_basedir);
+ if (opt_basedir[basedir_len - 1] != FN_LIBCHAR ||
+ opt_basedir[basedir_len - 1] != FN_LIBCHAR2)
{
char buff[FN_REFLEN];
- memset(buff, 0, sizeof(buff));
+ if (basedir_len + 2 > FN_REFLEN)
+ return -1;
- strncpy(buff, opt_basedir, sizeof(buff) - 1);
-#ifdef __WIN__
- strncat(buff, "/", sizeof(buff) - strlen(buff) - 1);
-#else
- strncat(buff, FN_DIRSEP, sizeof(buff) - strlen(buff) - 1);
-#endif
- buff[sizeof(buff) - 1]= 0;
+ memcpy(buff, opt_basedir, basedir_len);
+ buff[basedir_len]= '/';
+ buff[basedir_len + 1]= '\0';
+
my_free(opt_basedir);
opt_basedir= my_strdup(buff, MYF(MY_FAE));
}
@@ -895,10 +897,7 @@ static int process_options(int argc, char *argv[], char *operation)
generated when the defaults were read from the file, exit.
*/
if (!opt_no_defaults && ((error= get_default_values())))
- {
- error= -1;
- goto exit;
- }
+ return -1;
/*
Check to ensure required options are present and validate the operation.
@@ -906,11 +905,9 @@ static int process_options(int argc, char *argv[], char *operation)
read a configuration file named <plugin_name>.ini from the --plugin-dir
or --plugin-ini location if the --plugin-ini option presented.
*/
- strcpy(operation, "");
- if ((error = check_options(argc, argv, operation)))
- {
- goto exit;
- }
+ operation[0]= '\0';
+ if ((error= check_options(argc, argv, operation)))
+ return error;
if (opt_verbose)
{
@@ -922,8 +919,7 @@ static int process_options(int argc, char *argv[], char *operation)
printf("# lc_messages_dir = %s\n", opt_lc_messages_dir);
}
-exit:
- return error;
+ return 0;
}
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 2827353654b..010679b6ff2 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -2468,7 +2468,7 @@ static uint dump_events_for_db(char *db)
if (mysql_query_with_error_report(mysql, &event_list_res, "show events"))
DBUG_RETURN(0);
- strcpy(delimiter, ";");
+ safe_strcpy(delimiter, sizeof(delimiter), ";");
if (mysql_num_rows(event_list_res) > 0)
{
if (opt_xml)
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index abaf3ebb416..05a26fed059 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -6143,7 +6143,9 @@ int do_done(struct st_command *command)
if (*cur_block->delim)
{
/* Restore "old" delimiter after false if block */
- strcpy (delimiter, cur_block->delim);
+ if (safe_strcpy(delimiter, sizeof(delimiter), cur_block->delim))
+ die("Delimiter too long, truncated");
+
delimiter_length= strlen(delimiter);
}
/* Pop block from stack, goto next line */
@@ -6398,10 +6400,12 @@ void do_block(enum block_cmd cmd, struct st_command* command)
if (cur_block->ok)
{
cur_block->delim[0]= '\0';
- } else
+ }
+ else
{
/* Remember "old" delimiter if entering a false if block */
- strcpy (cur_block->delim, delimiter);
+ if (safe_strcpy(cur_block->delim, sizeof(cur_block->delim), delimiter))
+ die("Delimiter too long, truncated");
}
DBUG_PRINT("info", ("OK: %d", cur_block->ok));
@@ -11713,9 +11717,8 @@ static int setenv(const char *name, const char *value, int overwrite)
char *envvar= (char *)malloc(buflen);
if(!envvar)
return ENOMEM;
- strcpy(envvar, name);
- strcat(envvar, "=");
- strcat(envvar, value);
+
+ snprintf(envvar, buflen, "%s=%s", name, value);
putenv(envvar);
return 0;
}
diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake
index 6bcafbe766e..3711b669317 100644
--- a/cmake/install_macros.cmake
+++ b/cmake/install_macros.cmake
@@ -181,7 +181,7 @@ IF(WIN32)
MARK_AS_ADVANCED(SIGNCODE)
IF(SIGNCODE)
SET(SIGNTOOL_PARAMETERS
- /a /t http://timestamp.globalsign.com/?signature=sha2
+ /a /fd SHA256 /t http://timestamp.globalsign.com/?signature=sha2
CACHE STRING "parameters for signtool (list)")
IF(NOT SIGNTOOL_EXECUTABLE)
FILE(GLOB path_list
diff --git a/dbug/dbug.c b/dbug/dbug.c
index 17567585bfd..9c1371730be 100644
--- a/dbug/dbug.c
+++ b/dbug/dbug.c
@@ -508,7 +508,7 @@ static int DbugParse(CODE_STATE *cs, const char *control)
stack->delay= stack->next->delay;
stack->maxdepth= stack->next->maxdepth;
stack->sub_level= stack->next->sub_level;
- strcpy(stack->name, stack->next->name);
+ safe_strcpy(stack->name, sizeof(stack->name), stack->next->name);
stack->out_file= stack->next->out_file;
stack->out_file->used++;
if (stack->next == &init_settings)
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index 7252d008ea7..67528827dc0 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -835,7 +835,7 @@ parse_page(
{
unsigned long long id;
uint16_t undo_page_type;
- char str[20]={'\0'};
+ const char *str;
ulint n_recs;
uint32_t page_no, left_page_no, right_page_no;
ulint data_bytes;
@@ -843,11 +843,7 @@ parse_page(
ulint size_range_id;
/* Check whether page is doublewrite buffer. */
- if(skip_page) {
- strcpy(str, "Double_write_buffer");
- } else {
- strcpy(str, "-");
- }
+ str = skip_page ? "Double_write_buffer" : "-";
switch (mach_read_from_2(page + FIL_PAGE_TYPE)) {
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc
index f24b068fdf4..e7d69a25b76 100644
--- a/extra/mariabackup/backup_copy.cc
+++ b/extra/mariabackup/backup_copy.cc
@@ -57,6 +57,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include "backup_copy.h"
#include "backup_mysql.h"
#include <btr0btr.h>
+#ifdef _WIN32
+#include <direct.h> /* rmdir */
+#endif
#define ROCKSDB_BACKUP_DIR "#rocksdb"
@@ -1619,7 +1622,49 @@ bool backup_finish()
return(true);
}
-bool
+
+/*
+ Drop all empty database directories in the base backup
+ that do not exists in the icremental backup.
+
+ This effectively re-plays all DROP DATABASE statements happened
+ in between base backup and incremental backup creation time.
+
+ Note, only checking if base_dir/db/ is empty is not enough,
+ because inc_dir/db/db.opt might have been dropped for some reasons,
+ which may also result into empty base_dir/db/.
+
+ Only the fact that at the same time:
+ - base_dir/db/ exists
+ - inc_dir/db/ does not exist
+ means that DROP DATABASE happened.
+*/
+static void
+ibx_incremental_drop_databases(const char *base_dir,
+ const char *inc_dir)
+{
+ datadir_node_t node;
+ datadir_node_init(&node);
+ datadir_iter_t *it = datadir_iter_new(base_dir);
+
+ while (datadir_iter_next(it, &node)) {
+ if (node.is_empty_dir) {
+ char path[FN_REFLEN];
+ snprintf(path, sizeof(path), "%s/%s",
+ inc_dir, node.filepath_rel);
+ if (!directory_exists(path, false)) {
+ msg("Removing %s", node.filepath);
+ rmdir(node.filepath);
+ }
+ }
+
+ }
+ datadir_iter_free(it);
+ datadir_node_free(&node);
+}
+
+
+static bool
ibx_copy_incremental_over_full()
{
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
@@ -1702,6 +1747,8 @@ ibx_copy_incremental_over_full()
}
copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
}
+ ibx_incremental_drop_databases(xtrabackup_target_dir,
+ xtrabackup_incremental_dir);
}
diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc
index fed937be834..cee76e5f3d7 100644
--- a/extra/mariabackup/xbcloud.cc
+++ b/extra/mariabackup/xbcloud.cc
@@ -1676,8 +1676,11 @@ container_list_add_object(container_list *list, const char *name,
list->object_count += object_count_step;
}
assert(list->idx <= list->object_count);
- strcpy(list->objects[list->idx].name, name);
- strcpy(list->objects[list->idx].hash, hash);
+ safe_strcpy(list->objects[list->idx].name,
+ sizeof(list->objects[list->idx].name), name);
+ safe_strcpy(list->objects[list->idx].hash,
+ sizeof(list->objects[list->idx].hash), hash);
+
list->objects[list->idx].bytes = bytes;
++list->idx;
}
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 11e638e307c..bb440eec7f8 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -4460,11 +4460,13 @@ static bool xtrabackup_backup_low()
dst_log_file = NULL;
- if(!xtrabackup_incremental) {
- strcpy(metadata_type, "full-backuped");
+ if (!xtrabackup_incremental) {
+ safe_strcpy(metadata_type, sizeof(metadata_type),
+ "full-backuped");
metadata_from_lsn = 0;
} else {
- strcpy(metadata_type, "incremental");
+ safe_strcpy(metadata_type, sizeof(metadata_type),
+ "incremental");
metadata_from_lsn = incremental_lsn;
}
metadata_last_lsn = log_copy_scanned_lsn;
@@ -6211,7 +6213,8 @@ static bool xtrabackup_prepare_func(char** argv)
if (ok) {
char filename[FN_REFLEN];
- strcpy(metadata_type, "log-applied");
+ safe_strcpy(metadata_type, sizeof(metadata_type),
+ "log-applied");
if(xtrabackup_incremental
&& metadata_to_lsn < incremental_to_lsn)
diff --git a/include/m_string.h b/include/m_string.h
index 2747a9a7a9f..4a06b90c1aa 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -225,6 +225,44 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str,
lex_str->length= len;
}
+/*
+ Copies src into dst and ensures dst is a NULL terminated C string.
+
+ Returns 1 if the src string was truncated due to too small size of dst.
+ Returns 0 if src completely fit within dst. Pads the remaining dst with '\0'
+
+ Note: dst_size must be > 0
+*/
+static inline int safe_strcpy(char *dst, size_t dst_size, const char *src)
+{
+ memset(dst, '\0', dst_size);
+ strncpy(dst, src, dst_size - 1);
+ /*
+ If the first condition is true, we are guaranteed to have src length
+ >= (dst_size - 1), hence safe to access src[dst_size - 1].
+ */
+ if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0')
+ return 1; /* Truncation of src. */
+ return 0;
+}
+
+/*
+ Appends src to dst and ensures dst is a NULL terminated C string.
+
+ Returns 1 if the src string was truncated due to too small size of dst.
+ Returns 0 if src completely fit within the remaining dst space. Pads the
+ remaining dst with '\0'.
+
+ Note: dst_size must be > 0
+*/
+static inline int safe_strcat(char *dst, size_t dst_size, const char *src)
+{
+ size_t init_len= strlen(dst);
+ if (unlikely(init_len >= dst_size - 1))
+ return 1;
+ return safe_strcpy(dst + init_len, dst_size - init_len, src);
+}
+
#ifdef __cplusplus
static inline char *safe_str(char *str)
{ return str ? str : const_cast<char*>(""); }
diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result
index 7aa87fcbc6e..8de8a571052 100644
--- a/mysql-test/main/cte_recursive.result
+++ b/mysql-test/main/cte_recursive.result
@@ -5604,6 +5604,178 @@ r
3
drop table t1,t2,t3,x;
#
+# MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
+# within a CTE with name 'x' used in a subquery from
+# select list of another CTE
+#
+CREATE TABLE x (a int) ENGINE=MyISAM;
+INSERT INTO x VALUES (3),(7),(1);
+CREATE TABLE t1 (b int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (1);
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+c
+1
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+c
+1
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+c
+1
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+c
+3
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+c
+3
+WITH x AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT x.c from x;
+c
+1
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
+SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
+) AS c
+)
+SELECT cte.c from cte;
+c
+2
+DROP TABLE x;
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT cte.c FROM cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH x AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+SELECT b FROM x AS r
+) AS c
+)
+SELECT x.c from x;
+ERROR 42S02: Table 'test.x' doesn't exist
+WITH cte AS
+(
+SELECT
+(
+WITH x AS
+(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
+SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
+) AS c
+)
+SELECT cte.c from cte;
+ERROR 42S02: Table 'test.x' doesn't exist
+DROP TABLE t1;
+#
# End of 10.3 tests
#
#
diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test
index 3cc3b66b9c3..4d21a494bdc 100644
--- a/mysql-test/main/cte_recursive.test
+++ b/mysql-test/main/cte_recursive.test
@@ -3872,6 +3872,129 @@ select * from cte;
drop table t1,t2,t3,x;
--echo #
+--echo # MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
+--echo # within a CTE with name 'x' used in a subquery from
+--echo # select list of another CTE
+--echo #
+
+CREATE TABLE x (a int) ENGINE=MyISAM;
+INSERT INTO x VALUES (3),(7),(1);
+CREATE TABLE t1 (b int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (1);
+
+let $q1=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT cte.c FROM cte;
+eval $q1;
+
+let $q2=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT cte.c FROM cte;
+eval $q2;
+
+let $q3=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT cte.c FROM cte;
+eval $q3;
+
+
+let $q4=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT cte.c FROM cte;
+eval $q4;
+
+let $q5=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT cte.c FROM cte;
+eval $q5;
+
+let $q6=
+WITH x AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
+ SELECT b FROM x AS r
+ ) AS c
+)
+SELECT x.c from x;
+eval $q6;
+
+let $q7=
+WITH cte AS
+(
+ SELECT
+ (
+ WITH x AS
+ (WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
+ SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
+ ) AS c
+)
+SELECT cte.c from cte;
+eval $q7;
+
+
+DROP TABLE x;
+
+--ERROR ER_NO_SUCH_TABLE
+eval $q1;
+--ERROR ER_NO_SUCH_TABLE
+eval $q2;
+--ERROR ER_NO_SUCH_TABLE
+eval $q3;
+--ERROR ER_NO_SUCH_TABLE
+eval $q4;
+--ERROR ER_NO_SUCH_TABLE
+eval $q5;
+--ERROR ER_NO_SUCH_TABLE
+eval $q6;
+--ERROR ER_NO_SUCH_TABLE
+eval $q7;
+
+DROP TABLE t1;
+
+--echo #
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 2c2d959a953..41f9ac6fca1 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -18190,6 +18190,69 @@ DROP TABLE transaction_items;
DROP TABLE transactions;
DROP TABLE charges;
DROP TABLE ledgers;
+#
+# MDEV-30081: Splitting from a constant mergeable derived table
+# used in inner part of an outer join.
+#
+CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (3),(4),(7);
+CREATE TABLE t2 (
+id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
+) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (4,4,6),(7,7,7);
+CREATE TABLE t3 (
+wid int, wtid int, otid int, oid int,
+PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
+) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
+CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
+CREATE TABLE t5 (
+id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
+) ENGINE=MyISAM ;
+INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
+ANALYZE TABLE t1,t2,t3,t4,t5;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
+test.t4 analyze status OK
+test.t5 analyze status Engine-independent statistics collected
+test.t5 analyze status OK
+CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
+SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
+FROM
+t1, t2, t3
+LEFT JOIN
+(SELECT t4.* FROM t4 WHERE t4.a=3) dt
+ON t3.oid = dt.id AND t3.otid = 14
+LEFT JOIN v1
+ON (v1.id1 = dt.a)
+WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
+wid wtid otid oid t1_id t2_id id a id1
+7 17 7 7 7 7 NULL NULL NULL
+EXPLAIN SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
+FROM
+t1, t2, t3
+LEFT JOIN
+(SELECT t4.* FROM t4 WHERE t4.a=3) dt
+ON t3.oid = dt.id AND t3.otid = 14
+LEFT JOIN v1
+ON (v1.id1 = dt.a)
+WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 const PRIMARY,oid PRIMARY 4 const 1
+1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t2 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 1 Impossible ON condition
+1 PRIMARY <derived3> ref key0 key0 5 const 0 Using where
+3 LATERAL DERIVED t5 ref id1 id1 5 const 0 Using index
+DROP VIEW v1;
+DROP TABLE t1,t2,t3,t4,t5;
# End of 10.3 tests
#
# MDEV-18679: materialized view with SELECT S containing materialized
diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test
index bd38716f2a6..6cfe23b7866 100644
--- a/mysql-test/main/derived_cond_pushdown.test
+++ b/mysql-test/main/derived_cond_pushdown.test
@@ -3870,6 +3870,55 @@ DROP TABLE transactions;
DROP TABLE charges;
DROP TABLE ledgers;
+
+--echo #
+--echo # MDEV-30081: Splitting from a constant mergeable derived table
+--echo # used in inner part of an outer join.
+--echo #
+
+ CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (3),(4),(7);
+
+CREATE TABLE t2 (
+ id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
+) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (4,4,6),(7,7,7);
+
+CREATE TABLE t3 (
+ wid int, wtid int, otid int, oid int,
+ PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
+) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
+
+CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
+
+CREATE TABLE t5 (
+ id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
+) ENGINE=MyISAM ;
+INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
+
+ANALYZE TABLE t1,t2,t3,t4,t5;
+
+CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
+
+let $q=
+SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
+FROM
+ t1, t2, t3
+ LEFT JOIN
+ (SELECT t4.* FROM t4 WHERE t4.a=3) dt
+ ON t3.oid = dt.id AND t3.otid = 14
+ LEFT JOIN v1
+ ON (v1.id1 = dt.a)
+WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
+
+eval $q;
+eval EXPLAIN $q;
+
+DROP VIEW v1;
+DROP TABLE t1,t2,t3,t4,t5;
+
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/win.result b/mysql-test/main/win.result
index c9a651c7e5e..89dab46f103 100644
--- a/mysql-test/main/win.result
+++ b/mysql-test/main/win.result
@@ -4272,11 +4272,13 @@ GROUP BY
LEFT((SYSDATE()), 'foo')
WITH ROLLUP;
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
-NULL 1
-NULL 1
+0 1
+0 2
Warnings:
Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect INTEGER value: 'foo'
+Warning 1292 Truncated incorrect DOUBLE value: 'bar'
+Warning 1292 Truncated incorrect DOUBLE value: 'bar'
drop table t1;
#
#
@@ -4335,6 +4337,46 @@ pk a bit_or
DROP TABLE t2;
DROP TABLE t1;
#
+# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
+#
+CREATE TABLE t1 (i1 int, a int);
+INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
+CREATE TABLE t2 (i2 int);
+INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
+SELECT
+a,
+RANK() OVER (ORDER BY SUM(DISTINCT i1))
+FROM
+t1, t2 WHERE t2.i2 = t1.i1
+GROUP BY
+a;
+a RANK() OVER (ORDER BY SUM(DISTINCT i1))
+1 1
+2 2
+3 3
+DROP TABLE t1, t2;
+#
+# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
+#
+CREATE TABLE t1 (UID BIGINT);
+CREATE TABLE t2 (UID BIGINT);
+CREATE TABLE t3 (UID BIGINT);
+insert into t1 VALUES (1),(2);
+insert into t2 VALUES (1),(2);
+insert into t3 VALUES (1),(2);
+SELECT
+ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
+FROM t1 TT1,
+t2 TT2,
+t3 TT3
+WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
+GROUP BY TT1.UID
+;
+ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
+1
+1
+DROP TABLE t1, t2, t3;
+#
# End of 10.3 tests
#
#
diff --git a/mysql-test/main/win.test b/mysql-test/main/win.test
index 3588768f662..939c36e7c5e 100644
--- a/mysql-test/main/win.test
+++ b/mysql-test/main/win.test
@@ -2817,6 +2817,46 @@ DROP TABLE t2;
DROP TABLE t1;
+--echo #
+--echo # MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
+--echo #
+
+CREATE TABLE t1 (i1 int, a int);
+INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
+
+CREATE TABLE t2 (i2 int);
+INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
+
+SELECT
+ a,
+ RANK() OVER (ORDER BY SUM(DISTINCT i1))
+FROM
+ t1, t2 WHERE t2.i2 = t1.i1
+GROUP BY
+ a;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
+--echo #
+CREATE TABLE t1 (UID BIGINT);
+CREATE TABLE t2 (UID BIGINT);
+CREATE TABLE t3 (UID BIGINT);
+
+insert into t1 VALUES (1),(2);
+insert into t2 VALUES (1),(2);
+insert into t3 VALUES (1),(2);
+SELECT
+ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
+FROM t1 TT1,
+ t2 TT2,
+ t3 TT3
+WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
+GROUP BY TT1.UID
+;
+
+DROP TABLE t1, t2, t3;
--echo #
--echo # End of 10.3 tests
diff --git a/mysql-test/main/win_orderby.result b/mysql-test/main/win_orderby.result
index bf4a40a4db3..1a9860c1c76 100644
--- a/mysql-test/main/win_orderby.result
+++ b/mysql-test/main/win_orderby.result
@@ -24,3 +24,66 @@ pk count(a) over (order by pk rows between 2 preceding and 2 following)
28 5
27 5
drop table t0,t1;
+#
+# MDEV-30052: Crash with a query containing nested WINDOW clauses
+#
+CREATE TABLE t1 (c INT);
+insert into t1 values (1),(2);
+UPDATE t1 SET c=1
+WHERE c=2
+ORDER BY
+(1 IN ((
+SELECT *
+FROM (SELECT * FROM t1) AS v1
+GROUP BY c
+WINDOW v2 AS (ORDER BY
+(SELECT *
+FROM t1
+GROUP BY c
+WINDOW v3 AS (PARTITION BY c)
+)
+)
+))
+);
+drop table t1;
+#
+# MDEV-29359: Server crashed with heap-use-after-free in
+# Field::is_null(long long) const (Just testcase)
+#
+CREATE TABLE t1 (id int);
+INSERT INTO t1 VALUES (-1),(0),(84);
+SELECT
+id IN (SELECT id
+FROM t1
+WINDOW w AS (ORDER BY (SELECT 1
+FROM t1
+WHERE
+EXISTS ( SELECT id
+FROM t1
+GROUP BY id
+WINDOW w2 AS (ORDER BY id)
+)
+)
+)
+)
+FROM t1;
+id IN (SELECT id
+FROM t1
+WINDOW w AS (ORDER BY (SELECT 1
+FROM t1
+WHERE
+EXISTS ( SELECT id
+FROM t1
+GROUP BY id
+WINDOW w2 AS (ORDER BY id)
+)
+)
+)
+)
+1
+1
+1
+DROP TABLE t1;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/win_orderby.test b/mysql-test/main/win_orderby.test
index 7f02a582ea0..65421fc095c 100644
--- a/mysql-test/main/win_orderby.test
+++ b/mysql-test/main/win_orderby.test
@@ -33,3 +33,58 @@ limit 4;
--disable_view_protocol
drop table t0,t1;
+
+
+--echo #
+--echo # MDEV-30052: Crash with a query containing nested WINDOW clauses
+--echo #
+
+CREATE TABLE t1 (c INT);
+insert into t1 values (1),(2);
+UPDATE t1 SET c=1
+WHERE c=2
+ORDER BY
+ (1 IN ((
+ SELECT *
+ FROM (SELECT * FROM t1) AS v1
+ GROUP BY c
+ WINDOW v2 AS (ORDER BY
+ (SELECT *
+ FROM t1
+ GROUP BY c
+ WINDOW v3 AS (PARTITION BY c)
+ )
+ )
+ ))
+ );
+drop table t1;
+
+--echo #
+--echo # MDEV-29359: Server crashed with heap-use-after-free in
+--echo # Field::is_null(long long) const (Just testcase)
+--echo #
+
+CREATE TABLE t1 (id int);
+INSERT INTO t1 VALUES (-1),(0),(84);
+
+SELECT
+ id IN (SELECT id
+ FROM t1
+ WINDOW w AS (ORDER BY (SELECT 1
+ FROM t1
+ WHERE
+ EXISTS ( SELECT id
+ FROM t1
+ GROUP BY id
+ WINDOW w2 AS (ORDER BY id)
+ )
+ )
+ )
+ )
+FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/suite/binlog/r/binlog_recovery_after_checksum_change.result b/mysql-test/suite/binlog/r/binlog_recovery_after_checksum_change.result
new file mode 100644
index 00000000000..85b0b84c684
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_recovery_after_checksum_change.result
@@ -0,0 +1,8 @@
+connection default;
+set @@global.binlog_checksum=none;
+set @@session.debug_dbug='d,crash_before_write_second_checkpoint_event';
+set @@global.binlog_checksum=crc32;
+ERROR HY000: Lost connection to MySQL server during query
+connection default;
+NOT FOUND /Replication event checksum verification failed/ in mysqld.1.err
+End of the tests
diff --git a/mysql-test/suite/binlog/t/innodb_rc_insert_before_delete.test b/mysql-test/suite/binlog/t/innodb_rc_insert_before_delete.test
new file mode 100644
index 00000000000..891a7b8ba2f
--- /dev/null
+++ b/mysql-test/suite/binlog/t/innodb_rc_insert_before_delete.test
@@ -0,0 +1,89 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_mixed.inc
+--source include/count_sessions.inc
+
+# MDEV-30010 merely adds is a Read-Committed version MDEV-30225 test
+# solely to prove the RC isolation yields ROW binlog format as it is
+# supposed to:
+# https://mariadb.com/kb/en/unsafe-statements-for-statement-based-replication/#isolation-levels.
+# The original MDEV-30225 test is adapted to the RC to create
+# a similar safisticated scenario which does not lead to any deadlock though.
+
+--connect (pause_purge,localhost,root)
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+--connection default
+CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
+INSERT INTO t VALUES (10, 100);
+
+--connect (con1,localhost,root)
+BEGIN; # trx 0
+SELECT * FROM t WHERE sk = 100 FOR UPDATE;
+
+--connect (con2,localhost,root)
+SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL insert_wait_started";
+# trx 1 is locked on try to read the record in secondary index during duplicates
+# check. It's the first in waiting queue, that's why it will be woken up firstly
+# when trx 0 commits.
+--send INSERT INTO t VALUES (5, 100) # trx 1
+
+--connect (con3,localhost,root)
+SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
+SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL delete_started_waiting";
+# trx 2 can delete (5, 100) on master, but not on slave, as on slave trx 1
+# can insert (5, 100) after trx 2 positioned it's cursor. Trx 2 lock is placed
+# in waiting queue after trx 1 lock, but its persistent cursor position was
+# stored on (100, 10) record in secondary index before suspending. After trx 1
+# is committed, trx 2 will restore persistent cursor position on (100, 10). As
+# (100, 5) secondary index record was inserted before (100, 10) in logical
+# order, and (100, 10) record is delete-marked, trx 2 just continues scanning.
+#
+# Note. There can be several records with the same key in unique secondary
+# index, but only one of them must be non-delete-marked. That's why when we do
+# point query, cursor position is set in the first record in logical order, and
+# then records are iterated until either non-delete-marked record is found or
+# all records with the same unique fields are iterated.
+
+# to prepare showing interesting binlog events
+--let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1)
+BEGIN;
+--send UPDATE t SET sk = 200 WHERE sk = 100; # trx 2
+
+--connection con1
+SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
+DELETE FROM t WHERE sk=100; # trx 0
+COMMIT;
+--disconnect con1
+
+--connection con2
+--reap
+--disconnect con2
+
+--connection con3
+--error 0
+--reap
+if (`SELECT ROW_COUNT() > 0`)
+{
+ --echo unexpected effective UPDATE
+ --die
+}
+--echo must be logged in ROW format as the only event of trx 2 (con3)
+INSERT INTO t VALUES (11, 101);
+COMMIT;
+--source include/show_binlog_events.inc
+--disconnect con3
+
+--connection default
+# If the bug is not fixed, we will see the row inserted by trx 1 here. This can
+# cause duplicate key error on slave, when some other trx tries in insert row
+# with the same secondary key, as was inserted by trx 1, and not deleted by trx
+# 2.
+SELECT * FROM t;
+
+--disconnect pause_purge
+SET DEBUG_SYNC="RESET";
+DROP TABLE t;
+--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/suite/encryption/r/tempfiles_encrypted.result b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
index 077b770b9d2..e5c1e99d0e4 100644
--- a/mysql-test/suite/encryption/r/tempfiles_encrypted.result
+++ b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
@@ -4278,11 +4278,13 @@ GROUP BY
LEFT((SYSDATE()), 'foo')
WITH ROLLUP;
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
-NULL 1
-NULL 1
+0 1
+0 2
Warnings:
Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect INTEGER value: 'foo'
+Warning 1292 Truncated incorrect DOUBLE value: 'bar'
+Warning 1292 Truncated incorrect DOUBLE value: 'bar'
drop table t1;
#
#
@@ -4341,6 +4343,46 @@ pk a bit_or
DROP TABLE t2;
DROP TABLE t1;
#
+# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
+#
+CREATE TABLE t1 (i1 int, a int);
+INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
+CREATE TABLE t2 (i2 int);
+INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
+SELECT
+a,
+RANK() OVER (ORDER BY SUM(DISTINCT i1))
+FROM
+t1, t2 WHERE t2.i2 = t1.i1
+GROUP BY
+a;
+a RANK() OVER (ORDER BY SUM(DISTINCT i1))
+1 1
+2 2
+3 3
+DROP TABLE t1, t2;
+#
+# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
+#
+CREATE TABLE t1 (UID BIGINT);
+CREATE TABLE t2 (UID BIGINT);
+CREATE TABLE t3 (UID BIGINT);
+insert into t1 VALUES (1),(2);
+insert into t2 VALUES (1),(2);
+insert into t3 VALUES (1),(2);
+SELECT
+ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
+FROM t1 TT1,
+t2 TT2,
+t3 TT3
+WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
+GROUP BY TT1.UID
+;
+ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
+1
+1
+DROP TABLE t1, t2, t3;
+#
# End of 10.3 tests
#
#
diff --git a/mysql-test/suite/mariabackup/incremental_drop_db.result b/mysql-test/suite/mariabackup/incremental_drop_db.result
new file mode 100644
index 00000000000..3a6c89f8de0
--- /dev/null
+++ b/mysql-test/suite/mariabackup/incremental_drop_db.result
@@ -0,0 +1,30 @@
+call mtr.add_suppression("InnoDB: New log files created");
+#
+# Start of 10.3 tests
+#
+#
+# MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
+#
+CREATE DATABASE db1;
+CREATE DATABASE db2;
+CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
+CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
+# Create base backup
+DROP DATABASE db1;
+# Create incremental backup
+# Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
+# Prepare base backup, apply incremental one
+# shutdown server
+# remove datadir
+# xtrabackup move back
+# restart
+# Expect no 'db1' in the output, because it was really dropped.
+# Expect 'db2' in the ouput, because it was not dropped!
+# (its incremental directory was emptied only)
+SHOW DATABASES LIKE 'db%';
+Database (db%)
+db2
+DROP DATABASE db2;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/suite/mariabackup/incremental_drop_db.test b/mysql-test/suite/mariabackup/incremental_drop_db.test
new file mode 100644
index 00000000000..de270435e9d
--- /dev/null
+++ b/mysql-test/suite/mariabackup/incremental_drop_db.test
@@ -0,0 +1,68 @@
+--source include/have_innodb.inc
+call mtr.add_suppression("InnoDB: New log files created");
+
+--echo #
+--echo # Start of 10.3 tests
+--echo #
+
+--echo #
+--echo # MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
+--echo #
+
+--let $datadir=`SELECT @@datadir`
+--let $basedir=$MYSQLTEST_VARDIR/tmp/backup
+--let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1
+
+# Create two databases:
+# - db1 is dropped normally below
+# - db2 is used to cover a corner case: its db.opt file is removed
+
+# Incremental backup contains:
+# - no directory for db1
+# - an empty directory for db2 (after we remove db2/db.opt)
+
+
+CREATE DATABASE db1;
+CREATE DATABASE db2;
+
+# Add some tables to db1
+CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
+CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
+
+--echo # Create base backup
+--disable_result_log
+--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir
+--enable_result_log
+
+DROP DATABASE db1;
+
+--echo # Create incremental backup
+--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir
+
+--echo # Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
+--remove_file $incremental_dir/db2/db.opt
+
+
+--echo # Prepare base backup, apply incremental one
+--disable_result_log
+--exec $XTRABACKUP --prepare --target-dir=$basedir
+--exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir
+--enable_result_log
+
+--let $targetdir=$basedir
+--source include/restart_and_restore.inc
+--enable_result_log
+
+--echo # Expect no 'db1' in the output, because it was really dropped.
+--echo # Expect 'db2' in the ouput, because it was not dropped!
+--echo # (its incremental directory was emptied only)
+
+SHOW DATABASES LIKE 'db%';
+DROP DATABASE db2;
+
+--rmdir $basedir
+--rmdir $incremental_dir
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/suite/rpl/include/create_or_drop_sync_func.inc b/mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
new file mode 100644
index 00000000000..74cde5de1db
--- /dev/null
+++ b/mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
@@ -0,0 +1,75 @@
+# Creates or drops a stored function as a part of debug-sync based
+# synchronization mechanism between replication servers.
+#
+# Parameters:
+# $create_or_drop= [create]
+# $server_master = [master]
+# $server_slave = [slave]
+if (!$create_or_drop)
+{
+ --let $create_or_drop=create
+}
+
+if (`select strcmp('$create_or_drop', 'create') = 0`)
+{
+ if (!$server_master)
+ {
+ --let $server_master=master
+ }
+ if (!$server_slave)
+ {
+ --let $server_slave=slave
+ }
+
+ --connection $server_master
+ # Use a stored function to inject a debug_sync into the appropriate THD.
+ # The function does nothing on the master, and on the slave it injects the
+ # desired debug_sync action(s).
+ SET sql_log_bin=0;
+ --delimiter ||
+ CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ RETURN x;
+ END
+ ||
+ --delimiter ;
+ SET sql_log_bin=1;
+
+ --connection $server_slave
+
+ SET sql_log_bin=0;
+ --delimiter ||
+ CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+ RETURNS INT DETERMINISTIC
+ BEGIN
+ IF d1 != '' THEN
+ SET debug_sync = d1;
+ END IF;
+ IF d2 != '' THEN
+ SET debug_sync = d2;
+ END IF;
+ RETURN x;
+ END
+ ||
+ --delimiter ;
+SET sql_log_bin=1;
+}
+
+if (`select strcmp('$create_or_drop', 'drop') = 0`)
+{
+ if (!$server_slave)
+ {
+ --let $server_slave=slave=
+ }
+ if (!$server_master)
+ {
+ --let $server_master=master
+ }
+ --connection $server_slave
+ SET DEBUG_SYNC='RESET';
+
+ --connection $server_master
+ SET DEBUG_SYNC='RESET';
+ DROP FUNCTION foo;
+}
diff --git a/mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result b/mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
new file mode 100644
index 00000000000..f783b1e0783
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
@@ -0,0 +1,60 @@
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
+change master to master_delay=3, master_use_gtid=Slave_Pos;
+set @@GLOBAL.slave_parallel_threads=2;
+include/start_slave.inc
+connection master;
+create table t1 (a int);
+include/sync_slave_sql_with_master.inc
+#
+# Pt 1) Ensure SBM is updated immediately upon arrival of the next event
+# Lock t1 on slave so the first received transaction does not complete/commit
+connection slave;
+LOCK TABLES t1 WRITE;
+connection master;
+# Sleep 2 to allow a buffer between events for SBM check
+insert into t1 values (0);
+include/save_master_gtid.inc
+connection slave;
+# Waiting for transaction to arrive on slave and begin SQL Delay..
+# Validating SBM is updated on event arrival..
+# ..done
+connection slave;
+UNLOCK TABLES;
+include/sync_with_master_gtid.inc
+#
+# Pt 2) If the SQL thread has not entered an idle state, ensure
+# following events do not update SBM
+# Stop slave IO thread so it receives both events together on restart
+connection slave;
+include/stop_slave_io.inc
+connection master;
+# Sleep 2 to allow a buffer between events for SBM check
+insert into t1 values (1);
+# Sleep 3 to create gap between events
+insert into t1 values (2);
+connection slave;
+LOCK TABLES t1 WRITE;
+START SLAVE IO_THREAD;
+# Wait for first transaction to complete SQL delay and begin execution..
+# Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
+# ..and that SBM wasn't calculated using prior committed transactions
+# ..done
+connection slave;
+UNLOCK TABLES;
+#
+# Cleanup
+# Reset master_delay
+include/stop_slave.inc
+CHANGE MASTER TO master_delay=0;
+set @@GLOBAL.slave_parallel_threads=4;
+include/start_slave.inc
+connection master;
+DROP TABLE t1;
+include/save_master_gtid.inc
+connection slave;
+include/sync_with_master_gtid.inc
+include/rpl_end.inc
+# End of rpl_delayed_parallel_slave_sbm.test
diff --git a/mysql-test/suite/rpl/r/rpl_parallel_analyze.result b/mysql-test/suite/rpl/r/rpl_parallel_analyze.result
new file mode 100644
index 00000000000..c0a2abbc1ee
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_parallel_analyze.result
@@ -0,0 +1,76 @@
+include/master-slave.inc
+[connection master]
+# Initialize
+connection slave;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+# Setup data
+connection master;
+CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE ta (a int);
+include/save_master_gtid.inc
+connection slave;
+include/sync_with_master_gtid.inc
+connection master;
+SET sql_log_bin=0;
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+RETURNS INT DETERMINISTIC
+BEGIN
+RETURN x;
+END
+||
+SET sql_log_bin=1;
+connection slave;
+SET sql_log_bin=0;
+CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
+RETURNS INT DETERMINISTIC
+BEGIN
+IF d1 != '' THEN
+SET debug_sync = d1;
+END IF;
+IF d2 != '' THEN
+SET debug_sync = d2;
+END IF;
+RETURN x;
+END
+||
+SET sql_log_bin=1;
+include/stop_slave.inc
+SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
+SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
+SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
+SET GLOBAL slave_parallel_threads=10;
+SET GLOBAL slave_parallel_mode=conservative;
+SET GLOBAL gtid_strict_mode=ON;
+include/start_slave.inc
+connection master;
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format=statement;
+INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
+ANALYZE TABLE ta;
+Table Op Msg_type Msg_text
+test.ta analyze status Engine-independent statistics collected
+test.ta analyze status Table is already up to date
+include/save_master_gtid.inc
+connection slave;
+SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
+info
+ANALYZE TABLE ta
+set @@debug_sync="now signal sig_go";
+include/sync_with_master_gtid.inc
+# Cleanup
+connection master;
+DROP TABLE t1,ta;
+connection slave;
+SET DEBUG_SYNC='RESET';
+connection master;
+SET DEBUG_SYNC='RESET';
+DROP FUNCTION foo;
+include/save_master_gtid.inc
+connection slave;
+include/sync_with_master_gtid.inc
+include/stop_slave.inc
+SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
+SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
+SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm-slave.opt b/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm-slave.opt
new file mode 100644
index 00000000000..9eea6a54b68
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm-slave.opt
@@ -0,0 +1 @@
+--slave-parallel-threads=4
diff --git a/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test b/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
new file mode 100644
index 00000000000..4bcb7ad9e3f
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
@@ -0,0 +1,133 @@
+#
+# This test ensures that after a delayed parallel slave has idled, i.e.
+# executed everything in its relay log, the next event group that the SQL
+# thread reads from the relay log will immediately be used in the
+# Seconds_Behind_Master. In particular, it ensures that the calculation for
+# Seconds_Behind_Master is based on the timestamp of the new transaction,
+# rather than the last committed transaction.
+#
+# References:
+# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel
+# Replicas
+#
+
+--source include/master-slave.inc
+
+--connection slave
+--source include/stop_slave.inc
+--let $master_delay= 3
+--eval change master to master_delay=$master_delay, master_use_gtid=Slave_Pos
+--let $old_slave_threads= `SELECT @@GLOBAL.slave_parallel_threads`
+set @@GLOBAL.slave_parallel_threads=2;
+--source include/start_slave.inc
+
+--connection master
+create table t1 (a int);
+--source include/sync_slave_sql_with_master.inc
+
+--echo #
+--echo # Pt 1) Ensure SBM is updated immediately upon arrival of the next event
+
+--echo # Lock t1 on slave so the first received transaction does not complete/commit
+--connection slave
+LOCK TABLES t1 WRITE;
+
+--connection master
+--echo # Sleep 2 to allow a buffer between events for SBM check
+sleep 2;
+
+--let $ts_trx_before_ins= `SELECT UNIX_TIMESTAMP()`
+--let insert_ctr= 0
+--eval insert into t1 values ($insert_ctr)
+--inc $insert_ctr
+--source include/save_master_gtid.inc
+
+--connection slave
+
+--echo # Waiting for transaction to arrive on slave and begin SQL Delay..
+--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting until MASTER_DELAY seconds after master executed event';
+--source include/wait_condition.inc
+
+--echo # Validating SBM is updated on event arrival..
+--let $sbm_trx1_arrive= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
+--let $seconds_since_idling= `SELECT UNIX_TIMESTAMP() - $ts_trx_before_ins`
+if (`SELECT $sbm_trx1_arrive > ($seconds_since_idling + 1)`)
+{
+ --echo # SBM was $sbm_trx1_arrive yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
+ --die Seconds_Behind_Master should reset after idling
+}
+--echo # ..done
+
+--connection slave
+UNLOCK TABLES;
+--source include/sync_with_master_gtid.inc
+
+--echo #
+--echo # Pt 2) If the SQL thread has not entered an idle state, ensure
+--echo # following events do not update SBM
+
+--echo # Stop slave IO thread so it receives both events together on restart
+--connection slave
+--source include/stop_slave_io.inc
+
+--connection master
+
+--echo # Sleep 2 to allow a buffer between events for SBM check
+sleep 2;
+--let $ts_trxpt2_before_ins= `SELECT UNIX_TIMESTAMP()`
+--eval insert into t1 values ($insert_ctr)
+--inc $insert_ctr
+--echo # Sleep 3 to create gap between events
+sleep 3;
+--eval insert into t1 values ($insert_ctr)
+--inc $insert_ctr
+--let $ts_trx_after_ins= `SELECT UNIX_TIMESTAMP()`
+
+--connection slave
+LOCK TABLES t1 WRITE;
+
+START SLAVE IO_THREAD;
+
+--echo # Wait for first transaction to complete SQL delay and begin execution..
+--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock%' AND command LIKE 'Slave_Worker';
+--source include/wait_condition.inc
+
+--echo # Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
+--let $sbm_after_trx_no_idle= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
+--let $timestamp_trxpt2_arrive= `SELECT UNIX_TIMESTAMP()`
+if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins`)
+{
+ --let $cmpv= `SELECT $timestamp_trxpt2_arrive - $ts_trx_after_ins`
+ --echo # SBM $sbm_after_trx_no_idle was more recent than time since last transaction ($cmpv seconds)
+ --die Seconds_Behind_Master should not have used second transaction timestamp
+}
+--let $seconds_since_idling= `SELECT ($timestamp_trxpt2_arrive - $ts_trxpt2_before_ins)`
+--echo # ..and that SBM wasn't calculated using prior committed transactions
+if (`SELECT $sbm_after_trx_no_idle > ($seconds_since_idling + 1)`)
+{
+ --echo # SBM was $sbm_after_trx_no_idle yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
+ --die Seconds_Behind_Master calculation should not have used prior committed transaction
+}
+--echo # ..done
+
+--connection slave
+UNLOCK TABLES;
+
+--echo #
+--echo # Cleanup
+
+--echo # Reset master_delay
+--source include/stop_slave.inc
+--eval CHANGE MASTER TO master_delay=0
+--eval set @@GLOBAL.slave_parallel_threads=$old_slave_threads
+--source include/start_slave.inc
+
+--connection master
+DROP TABLE t1;
+--source include/save_master_gtid.inc
+
+--connection slave
+--source include/sync_with_master_gtid.inc
+
+--source include/rpl_end.inc
+--echo # End of rpl_delayed_parallel_slave_sbm.test
diff --git a/mysql-test/suite/rpl/t/rpl_parallel_analyze.test b/mysql-test/suite/rpl/t/rpl_parallel_analyze.test
new file mode 100644
index 00000000000..921c6f02904
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_parallel_analyze.test
@@ -0,0 +1,84 @@
+# The test file is created to prove fixes to
+# MDEV-30323 Some DDLs like ANALYZE can complete on parallel slave out of order
+# Debug-sync tests aiming at parallel replication of ADMIN commands
+# are welcome here.
+
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/master-slave.inc
+
+--echo # Initialize
+--connection slave
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+
+--echo # Setup data
+--connection master
+CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE ta (a int);
+--let $pre_load_gtid=`SELECT @@last_gtid`
+--source include/save_master_gtid.inc
+
+--connection slave
+--source include/sync_with_master_gtid.inc
+
+--source suite/rpl/include/create_or_drop_sync_func.inc
+
+# configure MDEV-30323 slave
+--source include/stop_slave.inc
+SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
+SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
+SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
+SET GLOBAL slave_parallel_threads=10;
+SET GLOBAL slave_parallel_mode=conservative;
+SET GLOBAL gtid_strict_mode=ON;
+--source include/start_slave.inc
+
+# MDEV-30323 setup needs two group of events the first of which is a DML
+# and ANALYZE is the 2nd.
+# The latter is made to race in slave execution over the DML thanks
+# to a DML latency simulation.
+# In the fixed case the race-over should not be a problem: ultimately
+# ANALYZE must wait for its turn to update slave@@global.gtid_binlog_pos.
+# Otherwise the reported OOO error must be issued.
+
+--connection master
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format=statement;
+INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
+
+ANALYZE TABLE ta;
+--source include/save_master_gtid.inc
+
+--connection slave
+--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
+--source include/wait_condition.inc
+
+SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
+if (`select strcmp(@@global.gtid_binlog_pos, '$pre_load_gtid') <> 0 or strcmp(@@global.gtid_slave_pos, '$pre_load_gtid') <> 0`)
+{
+ --let $bs=`SELECT @@global.gtid_binlog_pos`
+ --let $es=`SELECT @@global.gtid_slave_pos`
+ --echo Mismatch between expected $pre_load_gtid state and the actual binlog state " @@global.gtid_binlog_pos = $bs or/and slave execution state @@global.gtid_slave_pos = $es.
+ --die
+}
+
+set @@debug_sync="now signal sig_go";
+--source include/sync_with_master_gtid.inc
+
+--echo # Cleanup
+--connection master
+DROP TABLE t1,ta;
+--let $create_or_drop=drop
+--source suite/rpl/include/create_or_drop_sync_func.inc
+
+--source include/save_master_gtid.inc
+
+--connection slave
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
+SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
+SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
+--source include/start_slave.inc
+
+--source include/rpl_end.inc
diff --git a/sql/item.cc b/sql/item.cc
index 10ad6e31dec..ca6697422cc 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -10691,7 +10691,7 @@ table_map Item_direct_view_ref::used_tables() const
table_map used= (*ref)->used_tables();
return (used ?
used :
- ((null_ref_table != NO_NULL_TABLE) ?
+ (null_ref_table != NO_NULL_TABLE && !null_ref_table->const_table ?
null_ref_table->map :
(table_map)0 ));
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 724581fc6ec..38a59ccae82 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -366,7 +366,14 @@ public:
int8 aggr_level; /* nesting level of the aggregating subquery */
int8 max_arg_level; /* max level of unbound column references */
int8 max_sum_func_level;/* max level of aggregation for embedded functions */
- bool quick_group; /* If incremental update of fields */
+
+ /*
+ true (the default value) means this aggregate function can be computed
+ with TemporaryTableWithPartialSums algorithm (see end_update()).
+ false means this aggregate function needs OrderedGroupBy algorithm (see
+ end_write_group()).
+ */
+ bool quick_group;
/*
This list is used by the check for mixing non aggregated fields and
sum functions in the ONLY_FULL_GROUP_BY_MODE. We save all outer fields
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index 47ce6171357..64eb763a038 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -263,7 +263,7 @@ static char *get_plugindir()
{
static char plugin_dir[2*MAX_PATH];
get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path);
- strcat(plugin_dir, "/" STR(INSTALL_PLUGINDIR));
+ safe_strcat(plugin_dir, sizeof(plugin_dir), "/" STR(INSTALL_PLUGINDIR));
if (access(plugin_dir, 0) == 0)
return plugin_dir;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 9203743adfe..bb68218c6fe 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -5152,12 +5152,11 @@ static int init_server_components()
else // full wsrep initialization
{
// add basedir/bin to PATH to resolve wsrep script names
- char* const tmp_path= (char*)my_alloca(strlen(mysql_home) +
- strlen("/bin") + 1);
+ size_t tmp_path_size= strlen(mysql_home) + 5; /* including "/bin" */
+ char* const tmp_path= (char*)my_alloca(tmp_path_size);
if (tmp_path)
{
- strcpy(tmp_path, mysql_home);
- strcat(tmp_path, "/bin");
+ snprintf(tmp_path, tmp_path_size, "%s/bin", mysql_home);
wsrep_prepend_PATH(tmp_path);
}
else
@@ -5907,8 +5906,9 @@ int mysqld_main(int argc, char **argv)
char real_server_version[2 * SERVER_VERSION_LENGTH + 10];
set_server_version(real_server_version, sizeof(real_server_version));
- strcat(real_server_version, "' as '");
- strcat(real_server_version, server_version);
+ safe_strcat(real_server_version, sizeof(real_server_version), "' as '");
+ safe_strcat(real_server_version, sizeof(real_server_version),
+ server_version);
sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname,
real_server_version,
@@ -8256,7 +8256,8 @@ static int mysql_init_variables(void)
}
else
my_path(prg_dev, my_progname, "mysql/bin");
- strcat(prg_dev,"/../"); // Remove 'bin' to get base dir
+ // Remove 'bin' to get base dir
+ safe_strcat(prg_dev, sizeof(prg_dev), "/../");
cleanup_dirname(mysql_home,prg_dev);
}
#else
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 2c431ccc12d..8701fc19411 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -56,8 +56,7 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
rgi->event_relay_log_pos= qev->event_relay_log_pos;
rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos;
strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name);
- if (!(ev->is_artificial_event() || ev->is_relay_log_event() ||
- (ev->when == 0)))
+ if (event_can_update_last_master_timestamp(ev))
rgi->last_master_timestamp= ev->when + (time_t)ev->exec_time;
err= apply_event_and_update_pos_for_parallel(ev, thd, rgi);
diff --git a/sql/slave.cc b/sql/slave.cc
index 300e53bee1b..c788a206435 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -4148,10 +4148,10 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
the user might be surprised to see a claim that the slave is up to date
long before those queued events are actually executed.
*/
- if (!rli->mi->using_parallel() &&
- !(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
+ if ((!rli->mi->using_parallel()) && event_can_update_last_master_timestamp(ev))
{
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
+ rli->sql_thread_caught_up= false;
DBUG_ASSERT(rli->last_master_timestamp >= 0);
}
@@ -4203,6 +4203,17 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
if (rli->mi->using_parallel())
{
+ if (unlikely((rli->last_master_timestamp == 0 ||
+ rli->sql_thread_caught_up) &&
+ event_can_update_last_master_timestamp(ev)))
+ {
+ if (rli->last_master_timestamp < ev->when)
+ {
+ rli->last_master_timestamp= ev->when;
+ rli->sql_thread_caught_up= false;
+ }
+ }
+
int res= rli->parallel.do_event(serial_rgi, ev, event_size);
/*
In parallel replication, we need to update the relay log position
@@ -4223,7 +4234,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
This is the case for pre-10.0 events without GTID, and for handling
slave_skip_counter.
*/
- if (!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
+ if (event_can_update_last_master_timestamp(ev))
{
/*
Ignore FD's timestamp as it does not reflect the slave execution
@@ -4231,7 +4242,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
data modification event execution last long all this time
Seconds_Behind_Master is zero.
*/
- if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
+ if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT &&
+ rli->last_master_timestamp < ev->when)
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
DBUG_ASSERT(rli->last_master_timestamp >= 0);
@@ -7590,7 +7602,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
if (hot_log)
mysql_mutex_unlock(log_lock);
- rli->sql_thread_caught_up= false;
DBUG_RETURN(ev);
}
if (opt_reckless_slave) // For mysql-test
@@ -7754,7 +7765,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
rli->relay_log.wait_for_update_relay_log(rli->sql_driver_thd);
// re-acquire data lock since we released it earlier
mysql_mutex_lock(&rli->data_lock);
- rli->sql_thread_caught_up= false;
continue;
}
/*
@@ -7945,12 +7955,19 @@ event(errno: %d cur_log->error: %d)",
{
sql_print_information("Error reading relay log event: %s",
"slave SQL thread was killed");
- DBUG_RETURN(0);
+ goto end;
}
err:
if (errmsg)
sql_print_error("Error reading relay log event: %s", errmsg);
+
+end:
+ /*
+ Set that we are not caught up so if there is a hang/problem on restart,
+ Seconds_Behind_Master will still grow.
+ */
+ rli->sql_thread_caught_up= false;
DBUG_RETURN(0);
}
#ifdef WITH_WSREP
diff --git a/sql/slave.h b/sql/slave.h
index 5ca6054a178..e2bd5cec1b9 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -49,6 +49,7 @@
#include "rpl_filter.h"
#include "rpl_tblmap.h"
#include "rpl_gtid.h"
+#include "log_event.h"
#define SLAVE_NET_TIMEOUT 60
@@ -293,6 +294,17 @@ extern char *report_host, *report_password;
extern I_List<THD> threads;
+/*
+ Check that a binlog event (read from the relay log) is valid to update
+ last_master_timestamp. That is, a valid event is one with a consistent
+ timestamp which originated from a primary server.
+*/
+static inline bool event_can_update_last_master_timestamp(Log_event *ev)
+{
+ return ev && !(ev->is_artificial_event() || ev->is_relay_log_event() ||
+ (ev->when == 0));
+}
+
#else
#define close_active_mi() /* no-op */
#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 906458b7b3c..ce472697d40 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -1271,6 +1271,8 @@ send_result_message:
goto err;
DEBUG_SYNC(thd, "admin_command_kill_after_modify");
}
+ thd->resume_subsequent_commits(suspended_wfc);
+ DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
if (is_table_modified && is_cmd_replicated &&
(!opt_readonly || thd->slave_thread) && !thd->lex->no_write_to_binlog)
{
@@ -1280,10 +1282,8 @@ send_result_message:
if (res)
goto err;
}
-
my_eof(thd);
- thd->resume_subsequent_commits(suspended_wfc);
- DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
+
DBUG_RETURN(FALSE);
err:
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 67018c0299a..42e6aea2aa8 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -5776,6 +5776,12 @@ public:
uint sum_func_count;
uint hidden_field_count;
uint group_parts,group_length,group_null_parts;
+
+ /*
+ If we're doing a GROUP BY operation, shows which one is used:
+ true TemporaryTableWithPartialSums algorithm (see end_update()).
+ false OrderedGroupBy algorithm (see end_write_group()).
+ */
uint quick_group;
/**
Enabled when we have atleast one outer_sum_func. Needed when used
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index 63907c2c9b5..2e672591d09 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -101,49 +101,6 @@ bool LEX::check_dependencies_in_with_clauses()
/**
@brief
- Resolve references to CTE in specification of hanging CTE
-
- @details
- A CTE to which there are no references in the query is called hanging CTE.
- Although such CTE is not used for execution its specification must be
- subject to context analysis. All errors concerning references to
- non-existing tables or fields occurred in the specification must be
- reported as well as all other errors caught at the prepare stage.
- The specification of a hanging CTE might contain references to other
- CTE outside of the specification and within it if the specification
- contains a with clause. This function resolves all such references for
- all hanging CTEs encountered in the processed query.
-
- @retval
- false on success
- true on failure
-*/
-
-bool
-LEX::resolve_references_to_cte_in_hanging_cte()
-{
- for (With_clause *with_clause= with_clauses_list;
- with_clause; with_clause= with_clause->next_with_clause)
- {
- for (With_element *with_elem= with_clause->with_list.first;
- with_elem; with_elem= with_elem->next)
- {
- if (!with_elem->is_referenced())
- {
- TABLE_LIST *first_tbl=
- with_elem->spec->first_select()->table_list.first;
- TABLE_LIST **with_elem_end_pos= with_elem->head->tables_pos.end_pos;
- if (first_tbl && resolve_references_to_cte(first_tbl, with_elem_end_pos))
- return true;
- }
- }
- }
- return false;
-}
-
-
-/**
- @brief
Resolve table references to CTE from a sub-chain of table references
@param tables Points to the beginning of the sub-chain
@@ -288,8 +245,6 @@ LEX::check_cte_dependencies_and_resolve_references()
return false;
if (resolve_references_to_cte(query_tables, query_tables_last))
return true;
- if (resolve_references_to_cte_in_hanging_cte())
- return true;
return false;
}
@@ -488,47 +443,33 @@ With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl,
st_unit_ctxt_elem *ctxt)
{
With_element *found= 0;
+ st_select_lex_unit *top_unit= 0;
for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt;
unit_ctxt_elem;
unit_ctxt_elem= unit_ctxt_elem->prev)
{
st_select_lex_unit *unit= unit_ctxt_elem->unit;
With_clause *with_clause= unit->with_clause;
- /*
- First look for the table definition in the with clause attached to 'unit'
- if there is any such clause.
- */
if (with_clause)
{
- found= with_clause->find_table_def(tbl, NULL);
+ /*
+ If the reference to tbl that has to be resolved belongs to
+ the FROM clause of a descendant of top_unit->with_element
+ and this with element belongs to with_clause then this
+ element must be used as the barrier for the search in the
+ the list of CTEs from with_clause unless the clause contains
+ RECURSIVE.
+ */
+ With_element *barrier= 0;
+ if (top_unit && !with_clause->with_recursive &&
+ top_unit->with_element &&
+ top_unit->with_element->get_owner() == with_clause)
+ barrier= top_unit->with_element;
+ found= with_clause->find_table_def(tbl, barrier);
if (found)
break;
}
- /*
- If 'unit' is the unit that defines a with element then reset 'unit'
- to the unit whose attached with clause contains this with element.
- */
- With_element *with_elem= unit->with_element;
- if (with_elem)
- {
- if (!(unit_ctxt_elem= unit_ctxt_elem->prev))
- break;
- unit= unit_ctxt_elem->unit;
- }
- with_clause= unit->with_clause;
- /*
- Now look for the table definition in this with clause. If the with clause
- contains RECURSIVE the search is performed through all CTE definitions in
- clause, otherwise up to the definition of 'with_elem' unless it is NULL.
- */
- if (with_clause)
- {
- found= with_clause->find_table_def(tbl,
- with_clause->with_recursive ?
- NULL : with_elem);
- if (found)
- break;
- }
+ top_unit= unit;
}
return found;
}
diff --git a/sql/sql_cte.h b/sql/sql_cte.h
index 39648d731c7..4bc3ed69582 100644
--- a/sql/sql_cte.h
+++ b/sql/sql_cte.h
@@ -323,8 +323,6 @@ public:
friend
bool LEX::resolve_references_to_cte(TABLE_LIST *tables,
TABLE_LIST **tables_last);
- friend
- bool LEX::resolve_references_to_cte_in_hanging_cte();
};
const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8;
@@ -438,9 +436,6 @@ public:
friend
bool LEX::check_dependencies_in_with_clauses();
-
- friend
- bool LEX::resolve_references_to_cte_in_hanging_cte();
};
inline
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index ac570be78aa..24958b5adcc 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -774,8 +774,6 @@ void LEX::start(THD *thd_arg)
stmt_var_list.empty();
proc_list.elements=0;
- save_group_list.empty();
- save_order_list.empty();
win_ref= NULL;
win_frame= NULL;
frame_top_bound= NULL;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index aae994c8721..7545a9c4ead 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1085,6 +1085,7 @@ public:
group_list_ptrs, and re-establish the original list before each execution.
*/
SQL_I_List<ORDER> group_list;
+ SQL_I_List<ORDER> save_group_list;
Group_list_ptrs *group_list_ptrs;
List<Item> item_list; /* list of fields & expressions */
@@ -1150,6 +1151,7 @@ public:
const char *type; /* type of select for EXPLAIN */
SQL_I_List<ORDER> order_list; /* ORDER clause */
+ SQL_I_List<ORDER> save_order_list;
SQL_I_List<ORDER> gorder_list;
Item *select_limit, *offset_limit; /* LIMIT clause parameters */
bool is_set_query_expr_tail;
@@ -3486,8 +3488,6 @@ public:
}
- SQL_I_List<ORDER> save_group_list;
- SQL_I_List<ORDER> save_order_list;
LEX_CSTRING *win_ref;
Window_frame *win_frame;
Window_frame_bound *frame_top_bound;
@@ -4636,12 +4636,11 @@ public:
select_stack[0]->is_service_select);
}
+
bool check_dependencies_in_with_clauses();
- bool resolve_references_to_cte_in_hanging_cte();
bool check_cte_dependencies_and_resolve_references();
bool resolve_references_to_cte(TABLE_LIST *tables,
TABLE_LIST **tables_last);
-
};
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 2da682aee3a..5d5ba310a0e 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -54,7 +54,7 @@ public:
{
elements= tmp.elements;
first= tmp.first;
- next= tmp.next;
+ next= elements ? tmp.next : &first;;
return *this;
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index bb0b3ebb4da..7840f581789 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -8792,8 +8792,8 @@ TABLE_LIST *st_select_lex::convert_right_join()
void st_select_lex::prepare_add_window_spec(THD *thd)
{
LEX *lex= thd->lex;
- lex->save_group_list= group_list;
- lex->save_order_list= order_list;
+ save_group_list= group_list;
+ save_order_list= order_list;
lex->win_ref= NULL;
lex->win_frame= NULL;
lex->frame_top_bound= NULL;
@@ -8820,8 +8820,8 @@ bool st_select_lex::add_window_def(THD *thd,
win_part_list_ptr,
win_order_list_ptr,
win_frame);
- group_list= thd->lex->save_group_list;
- order_list= thd->lex->save_order_list;
+ group_list= save_group_list;
+ order_list= save_order_list;
if (parsing_place != SELECT_LIST)
{
fields_in_window_functions+= win_part_list_ptr->elements +
@@ -8847,8 +8847,8 @@ bool st_select_lex::add_window_spec(THD *thd,
win_part_list_ptr,
win_order_list_ptr,
win_frame);
- group_list= thd->lex->save_group_list;
- order_list= thd->lex->save_order_list;
+ group_list= save_group_list;
+ order_list= save_order_list;
if (parsing_place != SELECT_LIST)
{
fields_in_window_functions+= win_part_list_ptr->elements +
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index eb54484fa51..333a9c1f50d 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -3541,15 +3541,26 @@ bool JOIN::make_aggr_tables_info()
/*
If we have different sort & group then we must sort the data by group
- and copy it to another tmp table
+ and copy it to another tmp table.
+
This code is also used if we are using distinct something
we haven't been able to store in the temporary table yet
like SEC_TO_TIME(SUM(...)).
+
+ 3. Also, this is used when
+ - the query has Window functions,
+ - the GROUP BY operation is done with OrderedGroupBy algorithm.
+ In this case, the first temptable will contain pre-GROUP-BY data. Force
+ the creation of the second temporary table. Post-GROUP-BY dataset will be
+ written there, and then Window Function processing code will be able to
+ process it.
*/
if ((group_list &&
(!test_if_subpart(group_list, order) || select_distinct)) ||
- (select_distinct && tmp_table_param.using_outer_summary_function))
- { /* Must copy to another table */
+ (select_distinct && tmp_table_param.using_outer_summary_function) ||
+ (group_list && !tmp_table_param.quick_group && // (3)
+ select_lex->have_window_funcs())) // (3)
+ { /* Must copy to another table */
DBUG_PRINT("info",("Creating group table"));
calc_group_buffer(this, group_list);
@@ -22081,11 +22092,17 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/*
@brief
- Perform a GROUP BY operation over a stream of rows ordered by their group. The
- result is sent into join->result.
+ Perform OrderedGroupBy operation and write the output into join->result.
@detail
- Also applies HAVING, etc.
+ The input stream is ordered by the GROUP BY expression, so groups come
+ one after another. We only need to accumulate the aggregate value, when
+ a GROUP BY group ends, check the HAVING and send the group.
+
+ Note that the output comes in the GROUP BY order, which is required by
+ the MySQL's GROUP BY semantics. No further sorting is needed.
+
+ @seealso end_write_group() also implements SortAndGroup
*/
enum_nested_loop_state
@@ -22273,13 +22290,26 @@ end:
/*
@brief
- Perform a GROUP BY operation over rows coming in arbitrary order.
-
- This is done by looking up the group in a temp.table and updating group
- values.
+ Perform GROUP BY operation over rows coming in arbitrary order: use
+ TemporaryTableWithPartialSums algorithm.
+
+ @detail
+ The TemporaryTableWithPartialSums algorithm is:
+
+ CREATE TEMPORARY TABLE tmp (
+ group_by_columns PRIMARY KEY,
+ partial_sum
+ );
+
+ for each row R in join output {
+ INSERT INTO tmp (R.group_by_columns, R.sum_value)
+ ON DUPLICATE KEY UPDATE partial_sum=partial_sum + R.sum_value;
+ }
@detail
Also applies HAVING, etc.
+
+ @seealso end_unique_update()
*/
static enum_nested_loop_state
@@ -22427,13 +22457,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/*
@brief
- Perform a GROUP BY operation over a stream of rows ordered by their group.
- Write the result into a temporary table.
+ Perform OrderedGroupBy operation and write the output into the temporary
+ table (join_tab->table).
@detail
- Also applies HAVING, etc.
+ The input stream is ordered by the GROUP BY expression, so groups come
+ one after another. We only need to accumulate the aggregate value, when
+ a GROUP BY group ends, check the HAVING and write the group.
- The rows are written into temptable so e.g. filesort can read them.
+ @seealso end_send_group() also implements OrderedGroupBy
*/
enum_nested_loop_state
diff --git a/sql/sql_select.h b/sql/sql_select.h
index d71fc034a9d..85451eb1ae0 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1420,12 +1420,30 @@ public:
(set in make_join_statistics())
*/
bool impossible_where;
- List<Item> all_fields; ///< to store all fields that used in query
+
+ /*
+ All fields used in the query processing.
+
+ Initially this is a list of fields from the query's SQL text.
+
+ Then, ORDER/GROUP BY and Window Function code add columns that need to
+ be saved to be available in the post-group-by context. These extra columns
+ are added to the front, because this->all_fields points to the suffix of
+ this list.
+ */
+ List<Item> all_fields;
///Above list changed to use temporary table
List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3;
///Part, shared with list above, emulate following list
List<Item> tmp_fields_list1, tmp_fields_list2, tmp_fields_list3;
- List<Item> &fields_list; ///< hold field list passed to mysql_select
+
+ /*
+ The original field list as it was passed to mysql_select(). This refers
+ to select_lex->item_list.
+ CAUTION: this list is a suffix of this->all_fields list, that is, it shares
+ elements with that list!
+ */
+ List<Item> &fields_list;
List<Item> procedure_fields_list;
int error;
diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp
index a5c2e065742..1eeb4ac05ca 100644
--- a/storage/connect/array.cpp
+++ b/storage/connect/array.cpp
@@ -975,13 +975,13 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
xtrc(1, "Arraylist: len=%d\n", len);
p = (char *)PlugSubAlloc(g, NULL, len);
- strcpy(p, "(");
+ safe_strcpy(p, len, "(");
for (i = 0; i < Nval;) {
Value->SetValue_pvblk(Vblp, i);
Value->Prints(g, tp, z);
- strcat(p, tp);
- strcat(p, (++i == Nval) ? ")" : ",");
+ safe_strcat(p, len, tp);
+ safe_strcat(p, len, (++i == Nval) ? ")" : ",");
} // enfor i
xtrc(1, "Arraylist: newlen=%d\n", strlen(p));
diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp
index 1a1c5cf5d8d..880b4a1ce23 100644
--- a/storage/connect/bson.cpp
+++ b/storage/connect/bson.cpp
@@ -10,6 +10,7 @@
/* Include relevant sections of the MariaDB header file. */
/***********************************************************************/
#include <my_global.h>
+#include <m_string.h>
/***********************************************************************/
/* Include application header files: */
@@ -83,7 +84,7 @@ BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL)
PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng)
{
size_t i;
- bool b = false, ptyp = (bool *)pty;
+ bool b = false;
PBVAL bvp = NULL;
s = js;
@@ -598,7 +599,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
try {
if (!bvp) {
- strcpy(g->Message, "Null json tree");
+ safe_strcpy(g->Message, sizeof(g->Message), "Null json tree");
throw 1;
} else if (!fn) {
// Serialize to a string
@@ -606,9 +607,8 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
b = pretty == 1;
} else {
if (!(fs = fopen(fn, "wb"))) {
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
- "w", (int)errno, fn);
- strcat(strcat(g->Message, ": "), strerror(errno));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR) ": %s",
+ "w", (int)errno, fn, strerror(errno));
throw 2;
} else if (pretty >= 2) {
// Serialize to a pretty file
diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp
index 2d9132e20ed..8df04232277 100644
--- a/storage/connect/bsonudf.cpp
+++ b/storage/connect/bsonudf.cpp
@@ -4908,7 +4908,7 @@ char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endfor i
if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) {
- strcat(bsp->Msg, " array");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
// Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL;
@@ -5106,8 +5106,9 @@ char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
PUSH_WARNING("Result truncated to json_grp_size values");
if (arp)
- if ((bsp = BbinAlloc(g, initid->max_length, arp)))
- strcat(bsp->Msg, " array");
+ if ((bsp = BbinAlloc(g, initid->max_length, arp))) {
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
+ }
if (!bsp) {
*res_length = 0;
@@ -5153,8 +5154,9 @@ char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
PUSH_WARNING("Result truncated to json_grp_size values");
if (bop)
- if ((bsp = BbinAlloc(g, initid->max_length, bop)))
- strcat(bsp->Msg, " object");
+ if ((bsp = BbinAlloc(g, initid->max_length, bop))) {
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
+ }
if (!bsp) {
*res_length = 0;
@@ -5198,7 +5200,7 @@ char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL;
@@ -5253,7 +5255,7 @@ char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL;
@@ -5312,7 +5314,7 @@ char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL;
@@ -6075,7 +6077,7 @@ char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
// pretty = pty;
if ((bsp = BbinAlloc(bnx.G, len, jsp))) {
- strcat(bsp->Msg, " file");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " file");
bsp->Filename = fn;
bsp->Pretty = pretty;
} else {
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index cfb4de1652d..406974a6924 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -442,7 +442,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
hp->Headlen, hp->Filedate[0], hp->Filedate[1],
hp->Filedate[2]);
- strcat(g->Message, buf);
+ safe_strcat(g->Message, sizeof(g->Message), buf);
} // endif info
#endif // 0
diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp
index 1964e752eb6..84381724ba9 100644
--- a/storage/connect/filamfix.cpp
+++ b/storage/connect/filamfix.cpp
@@ -36,6 +36,8 @@
#include <fcntl.h>
#endif // !_WIN32
+#include <m_string.h>
+
/***********************************************************************/
/* Include application header files: */
/* global.h is header containing all global declarations. */
@@ -883,7 +885,6 @@ bool BGXFAM::OpenTableFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL);
- strcat(g->Message, filename);
} else
rc = 0;
@@ -1004,7 +1005,7 @@ int BGXFAM::Cardinality(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL);
- strcat(g->Message, filename);
+ safe_strcat(g->Message, sizeof(g->Message), filename);
return -1;
} else
return 0; // File does not exist
@@ -1384,7 +1385,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
/*********************************************************************/
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
PlugSetPath(tempname, To_File, Tdbp->GetPath());
- strcat(PlugRemoveType(tempname, tempname), ".t");
+ PlugRemoveType(tempname, tempname);
+ safe_strcat(tempname, _MAX_PATH, ".t");
remove(tempname); // Be sure it does not exist yet
#if defined(_WIN32)
@@ -1393,11 +1395,12 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) {
DWORD rc = GetLastError();
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT,
+ tempname);
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)tempname, _MAX_PATH, NULL);
- strcat(g->Message, tempname);
+ safe_strcat(g->Message, sizeof(g->Message), tempname);
return true;
} // endif Tfile
#else // UNIX
@@ -1405,8 +1408,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) {
int rc = errno;
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
- strcat(g->Message, strerror(errno));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)" %s", rc,
+ MODE_INSERT, tempname, strerror(errno));
return true;
} //endif Tfile
#endif // UNIX
diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp
index 34575c22e71..3e5741d378c 100644
--- a/storage/connect/filamgz.cpp
+++ b/storage/connect/filamgz.cpp
@@ -33,6 +33,8 @@
#include <fcntl.h>
#endif // !_WIN32
+#include <m_string.h>
+
/***********************************************************************/
/* Include application header files: */
/* global.h is header containing all global declarations. */
@@ -128,12 +130,13 @@ int GZFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
bool GZFAM::OpenTableFile(PGLOBAL g)
{
- char opmode[4], filename[_MAX_PATH];
- MODE mode = Tdbp->GetMode();
+ const char *opmode;
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
switch (mode) {
case MODE_READ:
- strcpy(opmode, "r");
+ opmode = "rb";
break;
case MODE_UPDATE:
/*****************************************************************/
@@ -147,7 +150,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
DelRows = Cardinality(g);
// This will erase the entire file
- strcpy(opmode, "w");
+ opmode = "wb";
// Block = 0; // For ZBKFAM
// Last = Nrec; // For ZBKFAM
Tdbp->ResetSize();
@@ -158,7 +161,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
break;
case MODE_INSERT:
- strcpy(opmode, "a+");
+ opmode = "a+b";
break;
default:
snprintf(g->Message, sizeof(g->Message), MSG(BAD_OPEN_MODE), mode);
@@ -170,13 +173,11 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
/* Use specific zlib functions. */
/* Treat files as binary. */
/*********************************************************************/
- strcat(opmode, "b");
Zfile = gzopen(PlugSetPath(filename, To_File, Tdbp->GetPath()), opmode);
if (Zfile == NULL) {
- snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR),
- opmode, (int)errno, filename);
- strcat(strcat(g->Message, ": "), strerror(errno));
+ snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR) ": %s",
+ opmode, (int)errno, filename, strerror(errno));
return (mode == MODE_READ && errno == ENOENT)
? PushWarning(g, Tdbp) : true;
} // endif Zfile
diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp
index 7a5d16accb2..f8168887e89 100644
--- a/storage/connect/filamtxt.cpp
+++ b/storage/connect/filamtxt.cpp
@@ -38,6 +38,8 @@
#include <fcntl.h>
#endif // !_WIN32
+#include <m_string.h>
+
/***********************************************************************/
/* Include application header files: */
/* global.h is header containing all global declarations. */
@@ -593,7 +595,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g)
} // endswitch Mode
// For blocked I/O or for moving lines, open the table in binary
- strcat(opmode, (Bin) ? "b" : "t");
+ safe_strcat(opmode, sizeof(opmode), (Bin) ? "b" : "t");
// Now open the file stream
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -1081,7 +1083,8 @@ bool DOSFAM::OpenTempFile(PGLOBAL g)
/* Open the temporary file, Spos is at the beginning of file. */
/*********************************************************************/
PlugSetPath(tempname, To_File, Tdbp->GetPath());
- strcat(PlugRemoveType(tempname, tempname), ".t");
+ PlugRemoveType(tempname, tempname);
+ safe_strcat(tempname, sizeof(tempname), ".t");
if (!(T_Stream = PlugOpenFile(g, tempname, "wb"))) {
if (trace(1))
@@ -1170,7 +1173,8 @@ int DOSFAM::RenameTempFile(PGLOBAL g)
if (!Abort) {
PlugSetPath(filename, To_File, Tdbp->GetPath());
- strcat(PlugRemoveType(filetemp, filename), ".ttt");
+ PlugRemoveType(filetemp, filename);
+ safe_strcat(filetemp, sizeof(filetemp), ".ttt");
remove(filetemp); // May still be there from previous error
if (rename(filename, filetemp)) { // Save file for security
diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp
index df889e9ab72..46dd7eba5ff 100644
--- a/storage/connect/filamvct.cpp
+++ b/storage/connect/filamvct.cpp
@@ -42,6 +42,8 @@
#include <fcntl.h>
#endif // !_WIN32
+#include <m_string.h>
+
/***********************************************************************/
/* Include application header files: */
/* global.h is header containing all global declarations. */
@@ -194,7 +196,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g)
if (Header == 2)
{
PlugRemoveType(filename, filename);
- strncat(filename, ".blk", _MAX_PATH - strlen(filename));
+ safe_strcat(filename, sizeof(filename), ".blk");
}
if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1
@@ -251,7 +253,7 @@ bool VCTFAM::SetBlockInfo(PGLOBAL g)
} else { // Header == 2
PlugRemoveType(filename, filename);
- strncat(filename, ".blk", _MAX_PATH - strlen(filename));
+ safe_strcat(filename, sizeof(filename), ".blk");
s= global_fopen(g, MSGID_CANNOT_OPEN, filename, "wb");
} // endif Header
@@ -587,7 +589,7 @@ bool VCTFAM::InitInsert(PGLOBAL g)
htrc("Exception %d: %s\n", n, g->Message);
rc = true;
} catch (const char *msg) {
- strncpy(g->Message, msg, sizeof(g->Message));
+ safe_strcpy(g->Message, sizeof(msg), msg);
rc = true;
} // end catch
@@ -891,8 +893,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g)
/*********************************************************************/
PlugSetPath(tempname, To_File, Tdbp->GetPath());
PlugRemoveType(tempname, tempname);
- strncat(tempname, ".t", _MAX_PATH - strlen(tempname));
-
+ safe_strcat(tempname, sizeof(tempname), ".t");
if (MaxBlk) {
if (MakeEmptyFile(g, tempname))
return true;
@@ -1563,7 +1564,7 @@ bool VCMFAM::InitInsert(PGLOBAL g)
htrc("Exception %d: %s\n", n, g->Message);
rc = true;
} catch (const char *msg) {
- strncpy(g->Message, msg, sizeof(g->Message));
+ safe_strcpy(g->Message, sizeof(g->Message), msg);
rc = true;
} // end catch
@@ -2083,10 +2084,10 @@ bool VECFAM::AllocateBuffer(PGLOBAL g)
// Allocate all that is needed to move lines and make Temp
if (UseTemp) {
Tempat = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
- strcpy(Tempat, Colfn);
+ safe_strcpy(Tempat, _MAX_PATH, Colfn);
PlugSetPath(Tempat, Tempat, Tdbp->GetPath());
PlugRemoveType(Tempat, Tempat);
- strncat(Tempat, ".t", _MAX_PATH - strlen(Tempat));
+ safe_strcat(Tempat, _MAX_PATH, ".t");
T_Fbs = (PFBLOCK *)PlugSubAlloc(g, NULL, Ncol * sizeof(PFBLOCK));
} // endif UseTemp
@@ -2461,7 +2462,7 @@ int VECFAM::RenameTempFile(PGLOBAL g)
snprintf(filename, _MAX_PATH, Colfn, i+1);
PlugSetPath(filename, filename, Tdbp->GetPath());
PlugRemoveType(filetemp, filename);
- strncat(filetemp, ".ttt", _MAX_PATH - strlen(filetemp));
+ safe_strcat(filetemp, sizeof(filetemp), ".ttt");
remove(filetemp); // May still be there from previous error
if (rename(filename, filetemp)) { // Save file for security
@@ -3222,7 +3223,7 @@ int BGVFAM::GetBlockInfo(PGLOBAL g)
if (Header == 2)
{
PlugRemoveType(filename, filename);
- strncat(filename, ".blk", _MAX_PATH - strlen(filename));
+ safe_strcat(filename, sizeof(filename), ".blk");
}
#if defined(_WIN32)
@@ -3301,7 +3302,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g)
} else // Header == 2
{
PlugRemoveType(filename, filename);
- strncat(filename, ".blk", _MAX_PATH - strlen(filename));
+ safe_strcat(filename, sizeof(filename), ".blk");
}
if (h == INVALID_HANDLE_VALUE) {
@@ -3399,7 +3400,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL);
- strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message));
+ safe_strcat(g->Message, sizeof(g->Message), filename);
if (h != INVALID_HANDLE_VALUE)
CloseHandle(h);
@@ -3535,7 +3536,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL);
- strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message));
+ safe_strcat(g->Message, sizeof(g->Message), filename);
} // endif Hfile
if (trace(1))
@@ -3623,8 +3624,8 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
if (Hfile == INVALID_HANDLE_VALUE) {
rc = errno;
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, mode, filename);
- strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)"%s", rc, mode,
+ filename, strerror(errno));
} // endif Hfile
if (trace(1))
@@ -3968,7 +3969,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
PlugSetPath(tempname, To_File, Tdbp->GetPath());
PlugRemoveType(tempname, tempname);
- strncat(tempname, ".t", _MAX_PATH - strlen(tempname));
+ safe_strcat(tempname, _MAX_PATH, ".t");
if (!MaxBlk)
remove(tempname); // Be sure it does not exist yet
@@ -3987,7 +3988,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)tempname, _MAX_PATH, NULL);
- strncat(g->Message, tempname, sizeof(g->Message) - strlen(g->Message));
+ safe_strcat(g->Message, sizeof(g->Message), tempname);
return true;
} // endif Tfile
#else // UNIX
@@ -3997,8 +3998,8 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) {
int rc = errno;
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname);
- strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR) "%s", rc, MODE_INSERT,
+ tempname, strerror(errno));
return true;
} //endif Tfile
#endif // UNIX
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index 814503ae6f7..40926cd2ae7 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -29,6 +29,7 @@
#include <fcntl.h>
#endif // !_WIN32
#include <time.h>
+#include <m_string.h>
/***********************************************************************/
/* Include application header files: */
@@ -181,7 +182,8 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
while (true) {
if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
- strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName);
+ snprintf(filename, sizeof(filename), "%s%s%s",
+ drive, direc, FileData.cFileName);
if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) {
FindClose(hSearch);
@@ -217,7 +219,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
struct dirent *entry;
_splitpath(filename, NULL, direc, pattern, ftype);
- strcat(pattern, ftype);
+ safe_strcat(pattern, sizeof(pattern), ftype);
// Start searching files in the target directory.
if (!(dir = opendir(direc))) {
@@ -226,7 +228,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
} // endif dir
while ((entry = readdir(dir))) {
- strcat(strcpy(fn, direc), entry->d_name);
+ snprintf(fn, sizeof(fn), "%s%s", direc, entry->d_name);
if (lstat(fn, &fileinfo) < 0) {
snprintf(g->Message, sizeof(g->Message), "%s: %s", fn, strerror(errno));
@@ -240,7 +242,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
if (fnmatch(pattern, entry->d_name, 0))
continue; // Not a match
- strcat(strcpy(filename, direc), entry->d_name);
+ snprintf(filename, sizeof(filename), "%s%s", direc, entry->d_name);
if (ZipFile(g, zutp, filename, entry->d_name, buf)) {
closedir(dir);
diff --git a/storage/connect/javaconn.cpp b/storage/connect/javaconn.cpp
index 0dc467aa7ee..3d1dfbc3f26 100644
--- a/storage/connect/javaconn.cpp
+++ b/storage/connect/javaconn.cpp
@@ -33,6 +33,8 @@
#define NODW
#endif // !_WIN32
+#include <m_string.h>
+
/***********************************************************************/
/* Required objects includes. */
/***********************************************************************/
@@ -231,15 +233,16 @@ bool JAVAConn::GetJVM(PGLOBAL g)
#if defined(_WIN32)
for (ntry = 0; !LibJvm && ntry < 3; ntry++) {
if (!ntry && JvmPath) {
- strcat(strcpy(soname, JvmPath), "\\jvm.dll");
+ snprintf(soname, sizeof(soname), "%s\\jvm.dll", JvmPath);
+
ntry = 3; // No other try
} else if (ntry < 2 && getenv("JAVA_HOME")) {
- strcpy(soname, getenv("JAVA_HOME"));
+ safe_strcpy(soname, sizeof(soname), getenv("JAVA_HOME"));
if (ntry == 1)
- strcat(soname, "\\jre");
+ safe_strcat(soname, sizeof(soname), "\\jre");
- strcat(soname, "\\bin\\client\\jvm.dll");
+ safe_strcat(soname, sizeof(soname), "\\bin\\client\\jvm.dll");
} else {
// Try to find it through the registry
char version[16];
@@ -247,11 +250,12 @@ bool JAVAConn::GetJVM(PGLOBAL g)
LONG rc;
DWORD BufferSize = 16;
- strcpy(soname, "jvm.dll"); // In case it fails
+ safe_strcpy(soname, sizeof(soname), "jvm.dll"); // In case it fails
if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "CurrentVersion",
RRF_RT_ANY, NULL, (PVOID)&version, &BufferSize)) == ERROR_SUCCESS) {
- strcat(strcat(javaKey, "\\"), version);
+ safe_strcat(javaKey, sizeof(javaKey), "\\");
+ safe_strcat(javaKey, sizeof(javaKey), version);
BufferSize = sizeof(soname);
if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "RuntimeLib",
@@ -272,11 +276,11 @@ bool JAVAConn::GetJVM(PGLOBAL g)
char buf[256];
DWORD rc = GetLastError();
- snprintf(g->Message, sizeof(g->Message), MSG(DLL_LOAD_ERROR), rc, soname);
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
- strcat(strcat(g->Message, ": "), buf);
+ snprintf(g->Message, sizeof(g->Message), MSG(DLL_LOAD_ERROR)": %s", rc,
+ soname, buf);
} else if (!(CreateJavaVM = (CRTJVM)GetProcAddress((HINSTANCE)LibJvm,
"JNI_CreateJavaVM"))) {
snprintf(g->Message, sizeof(g->Message), MSG(PROCADD_ERROR), GetLastError(), "JNI_CreateJavaVM");
@@ -301,13 +305,14 @@ bool JAVAConn::GetJVM(PGLOBAL g)
for (ntry = 0; !LibJvm && ntry < 2; ntry++) {
if (!ntry && JvmPath) {
- strcat(strcpy(soname, JvmPath), "/libjvm.so");
+ snprintf(soname, sizeof(soname), "%s/libjvm.so", JvmPath);
ntry = 2;
} else if (!ntry && getenv("JAVA_HOME")) {
// TODO: Replace i386 by a better guess
- strcat(strcpy(soname, getenv("JAVA_HOME")), "/jre/lib/i386/client/libjvm.so");
+ snprintf(soname, sizeof(soname), "%s/jre/lib/i386/client/libjvm.so",
+ getenv("JAVA_HOME"));
} else { // Will need LD_LIBRARY_PATH to be set
- strcpy(soname, "libjvm.so");
+ safe_strcpy(soname, sizeof(soname), "libjvm.so");
ntry = 2;
} // endelse
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index c37c4955607..6b1da3af628 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -10,6 +10,7 @@
/* Include relevant sections of the MariaDB header file. */
/***********************************************************************/
#include <my_global.h>
+#include <m_string.h>
/***********************************************************************/
/* Include application header files: */
@@ -270,7 +271,7 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
jdp->dfp = GetDefaultPrec();
if (!jsp) {
- strcpy(g->Message, "Null json tree");
+ safe_strcpy(g->Message, sizeof(g->Message), "Null json tree");
throw 1;
} else if (!fn) {
// Serialize to a string
@@ -278,9 +279,8 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
b = pretty == 1;
} else {
if (!(fs = fopen(fn, "wb"))) {
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
- "w", (int)errno, fn);
- strcat(strcat(g->Message, ": "), strerror(errno));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR) ": %s",
+ "w", (int)errno, fn, strerror(errno));
throw 2;
} else if (pretty >= 2) {
// Serialize to a pretty file
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 91bd42a4f3e..bc75f80bcc5 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -4755,7 +4755,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((arp = (PJAR)JsonNew(g, TYPE_JAR)) &&
(bsp = JbinAlloc(g, args, initid->max_length, arp))) {
- strcat(bsp->Msg, " array");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
for (uint i = 0; i < args->arg_count; i++)
arp->AddArrayValue(g, MakeValue(g, args, i));
@@ -4832,7 +4832,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
arp->InitArray(gb);
if ((bsp = JbinAlloc(g, args, initid->max_length, top))) {
- strcat(bsp->Msg, " array");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
bsp->Jsp = arp;
} // endif bsp
@@ -5053,7 +5053,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
} else
bsp = NULL;
@@ -5109,7 +5109,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
} else
bsp = NULL;
@@ -5168,7 +5168,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
- strcat(bsp->Msg, " object");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
} else
bsp = NULL;
@@ -5390,7 +5390,7 @@ char *jbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endif CheckMemory
if ((bsp = JbinAlloc(g, args, initid->max_length, jarp)))
- strcat(bsp->Msg, " array");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
// Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL;
@@ -5465,7 +5465,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g));
if ((bsp = JbinAlloc(g, args, initid->max_length, jsp)))
- strcat(bsp->Msg, " item");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " item");
else
*error = 1;
@@ -5825,7 +5825,7 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
pretty = pty;
if ((bsp = JbinAlloc(g, args, len, jsp))) {
- strcat(bsp->Msg, " file");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " file");
bsp->Filename = fn;
bsp->Pretty = pretty;
} else {
@@ -6161,9 +6161,8 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
/* Parse the json file and allocate its tree structure. */
/*********************************************************************************/
if (!(fs = fopen(outfn, "wb"))) {
- snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
- "w", (int)errno, outfn);
- strcat(strcat(g->Message, ": "), strerror(errno));
+ snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR)": %s",
+ "w", (int)errno, outfn, strerror(errno));
CloseMemMap(mm.memory, len);
return NULL;
} // endif fs
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index e0e69c61183..b37a2c705b2 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -405,18 +405,20 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db,
port = mysqld_port;
if (!strnicmp(srcdef, "select ", 7) || strstr(srcdef, "%s")) {
- query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 10);
+ size_t query_sz = strlen(srcdef) + 10;
+ query = (char *)PlugSubAlloc(g, NULL, query_sz);
if ((p= strstr(srcdef, "%s")))
{
/* Replace %s with 1=1 */
- sprintf(query, "%.*s1=1%s", (int) (p - srcdef), srcdef, p + 2); // dummy where clause
+ snprintf(query, query_sz, "%.*s1=1%s",
+ (int) (p - srcdef), srcdef, p + 2); // dummy where clause
}
- else
- strcpy(query, srcdef);
+ else
+ safe_strcpy(query, query_sz, srcdef);
if (!strnicmp(srcdef, "select ", 7))
- strcat(query, " LIMIT 0");
+ safe_strcat(query, query_sz, " LIMIT 0");
} else
query = (char *)srcdef;
diff --git a/storage/mroonga/vendor/groonga/lib/alloc.c b/storage/mroonga/vendor/groonga/lib/alloc.c
index 94e31901cec..adb35e04c45 100644
--- a/storage/mroonga/vendor/groonga/lib/alloc.c
+++ b/storage/mroonga/vendor/groonga/lib/alloc.c
@@ -828,7 +828,7 @@ grn_free_default(grn_ctx *ctx, void *ptr,
if (ptr) {
GRN_ADD_ALLOC_COUNT(-1);
} else {
- GRN_LOG(ctx, GRN_LOG_ALERT, "free fail (%s:%d) <%d>",
+ GRN_LOG(ctx, GRN_LOG_ALERT, "free fail (nullptr) (%s:%d) <%d>",
file, line, alloc_count);
}
}