summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOleksandr Byelkin <sanja@mariadb.com>2023-04-24 12:43:47 +0200
committerOleksandr Byelkin <sanja@mariadb.com>2023-04-24 12:43:47 +0200
commit1d74927c58cea438c135a95886a9224405819a96 (patch)
tree197d4d1b6733566d23fe016a4dd71850ddf96db0
parentc6e58a8d173b7e9689952d07678ba79702ba8021 (diff)
parentd3e394b3b1ff1e2c4e160972aad1f78a13fbb62e (diff)
downloadmariadb-git-1d74927c58cea438c135a95886a9224405819a96.tar.gz
Merge branch '10.4' into 10.5
-rw-r--r--client/mysqltest.cc1
-rw-r--r--debian/control1
-rw-r--r--extra/mariabackup/backup_copy.cc232
-rw-r--r--extra/mariabackup/backup_copy.h19
-rw-r--r--extra/mariabackup/backup_mysql.cc40
-rw-r--r--extra/mariabackup/backup_mysql.h15
-rw-r--r--extra/mariabackup/datasink.h29
-rw-r--r--extra/mariabackup/write_filt.cc14
-rw-r--r--extra/mariabackup/write_filt.h3
-rw-r--r--extra/mariabackup/xtrabackup.cc234
-rw-r--r--extra/mariabackup/xtrabackup.h11
-rw-r--r--include/my_alloca.h6
-rw-r--r--mysql-test/include/sql_mode_pad_char_to_full_length.inc31
-rw-r--r--mysql-test/lib/My/CoreDump.pm33
-rw-r--r--mysql-test/main/bootstrap.result45
-rw-r--r--mysql-test/main/bootstrap.test107
-rw-r--r--mysql-test/main/ctype_uca_partitions.result40
-rw-r--r--mysql-test/main/ctype_uca_partitions.test32
-rw-r--r--mysql-test/main/derived.result23
-rw-r--r--mysql-test/main/derived.test30
-rw-r--r--mysql-test/main/opt_trace.result356
-rw-r--r--mysql-test/main/opt_trace.test19
-rw-r--r--mysql-test/main/parser.result28
-rw-r--r--mysql-test/main/parser.test17
-rw-r--r--mysql-test/main/sql_mode_pad_char_to_full_length.result94
-rw-r--r--mysql-test/main/sql_mode_pad_char_to_full_length.test19
-rw-r--r--mysql-test/main/update.result29
-rw-r--r--mysql-test/main/update.test23
-rw-r--r--mysql-test/suite/galera/r/MDEV-30955.result26
-rw-r--r--mysql-test/suite/galera/r/mdev-26175.result24
-rw-r--r--mysql-test/suite/galera/t/MDEV-30955.test70
-rw-r--r--mysql-test/suite/galera/t/galera_sequences.test1
-rw-r--r--mysql-test/suite/galera/t/mdev-26175.test27
-rw-r--r--mysql-test/suite/galera_sr/r/MDEV-30862.result11
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-30862.test24
-rw-r--r--mysql-test/suite/innodb/r/default_row_format_alter.result20
-rw-r--r--mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result51
-rw-r--r--mysql-test/suite/innodb/t/default_row_format_alter.test17
-rw-r--r--mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test18
-rw-r--r--mysql-test/suite/mariabackup/aria_log_dir_path.result41
-rw-r--r--mysql-test/suite/mariabackup/aria_log_dir_path.test105
-rw-r--r--mysql-test/suite/mariabackup/aria_log_dir_path_rel.result41
-rw-r--r--mysql-test/suite/mariabackup/aria_log_dir_path_rel.test4
-rw-r--r--scripts/wsrep_sst_mariabackup.sh3
-rw-r--r--sql/handler.cc8
-rw-r--r--sql/log.cc2
-rw-r--r--sql/log_event.h2
-rw-r--r--sql/mysqld.cc5
-rw-r--r--sql/opt_range.cc3
-rw-r--r--sql/sql_derived.cc3
-rw-r--r--sql/sql_parse.cc2
-rw-r--r--sql/sql_select.cc4
-rw-r--r--sql/sql_table.cc13
-rw-r--r--sql/sql_yacc.yy56
-rw-r--r--sql/wsrep_client_service.cc2
-rw-r--r--sql/wsrep_high_priority_service.cc2
-rw-r--r--storage/connect/catalog.h4
-rw-r--r--storage/connect/ha_connect.cc5
-rw-r--r--storage/connect/reldef.cpp26
-rw-r--r--storage/connect/tabbson.cpp47
-rw-r--r--storage/connect/tabdos.cpp61
-rw-r--r--storage/connect/tabext.cpp57
-rw-r--r--storage/connect/tabfmt.cpp38
-rw-r--r--storage/connect/tabjdbc.cpp45
-rw-r--r--storage/connect/tabjson.cpp54
-rw-r--r--storage/connect/value.cpp8
-rw-r--r--storage/connect/value.h2
-rw-r--r--storage/innobase/btr/btr0btr.cc2
-rw-r--r--storage/innobase/fts/fts0fts.cc10
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc4
-rw-r--r--storage/innobase/include/os0file.h6
-rw-r--r--storage/innobase/row/row0merge.cc5
-rw-r--r--storage/rocksdb/build_rocksdb.cmake2
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result41
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/self_reference_multi.result4
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf3
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test56
-rw-r--r--storage/spider/spd_db_mysql.cc118
-rw-r--r--storage/spider/spd_db_mysql.h4
79 files changed, 2124 insertions, 594 deletions
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 9fa1b1dafe9..489beec1d91 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -5195,6 +5195,7 @@ void do_shutdown_server(struct st_command *command)
if (!timeout || wait_until_dead(pid, timeout < 5 ? 5 : timeout))
{
(void) my_kill(pid, SIGKILL);
+ wait_until_dead(pid, 5);
}
}
DBUG_VOID_RETURN;
diff --git a/debian/control b/debian/control
index 5352e9902ed..e22760ee19f 100644
--- a/debian/control
+++ b/debian/control
@@ -623,6 +623,7 @@ Depends: libxml2,
unixodbc,
${misc:Depends},
${shlibs:Depends}
+Recommends: curl
Breaks: mariadb-connect-engine-10.1,
mariadb-connect-engine-10.2,
mariadb-connect-engine-10.3,
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc
index 27c4ba29c91..1bdf5a04171 100644
--- a/extra/mariabackup/backup_copy.cc
+++ b/extra/mariabackup/backup_copy.cc
@@ -73,9 +73,8 @@ bool binlog_locked;
static void rocksdb_create_checkpoint();
static bool has_rocksdb_plugin();
-static void copy_or_move_dir(const char *from, const char *to, bool copy, bool allow_hardlinks);
-static void rocksdb_backup_checkpoint();
-static void rocksdb_copy_back();
+static void rocksdb_backup_checkpoint(ds_ctxt *ds_data);
+static void rocksdb_copy_back(ds_ctxt *ds_data);
static bool is_abs_path(const char *path)
{
@@ -131,7 +130,9 @@ struct datadir_thread_ctxt_t {
bool ret;
};
-static bool backup_files_from_datadir(const char *dir_path);
+static bool backup_files_from_datadir(ds_ctxt_t *ds_data,
+ const char *dir_path,
+ const char *prefix);
/************************************************************************
Retirn true if character if file separator */
@@ -803,7 +804,7 @@ if passes the rules for partial backup.
@return true if file backed up or skipped successfully. */
static
bool
-datafile_copy_backup(const char *filepath, uint thread_n)
+datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n)
{
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
@@ -824,7 +825,7 @@ datafile_copy_backup(const char *filepath, uint thread_n)
}
if (filename_matches(filepath, ext_list)) {
- return copy_file(ds_data, filepath, filepath, thread_n);
+ return ds_data->copy_file(filepath, filepath, thread_n);
}
return(true);
@@ -865,7 +866,8 @@ datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f)
return(true);
}
-bool backup_file_print_buf(const char *filename, const char *buf, int buf_len)
+bool ds_ctxt_t::backup_file_print_buf(const char *filename,
+ const char *buf, int buf_len)
{
ds_file_t *dstfile = NULL;
MY_STAT stat; /* unused for now */
@@ -876,7 +878,7 @@ bool backup_file_print_buf(const char *filename, const char *buf, int buf_len)
stat.st_size = buf_len;
stat.st_mtime = my_time(0);
- dstfile = ds_open(ds_data, filename, &stat);
+ dstfile = ds_open(this, filename, &stat);
if (dstfile == NULL) {
msg("error: Can't open the destination stream for %s",
filename);
@@ -915,9 +917,9 @@ error_close:
return true;
};
-static
bool
-backup_file_vprintf(const char *filename, const char *fmt, va_list ap)
+ds_ctxt_t::backup_file_vprintf(const char *filename,
+ const char *fmt, va_list ap)
{
char *buf = 0;
int buf_len;
@@ -928,7 +930,7 @@ backup_file_vprintf(const char *filename, const char *fmt, va_list ap)
}
bool
-backup_file_printf(const char *filename, const char *fmt, ...)
+ds_ctxt_t::backup_file_printf(const char *filename, const char *fmt, ...)
{
bool result;
va_list ap;
@@ -1054,16 +1056,15 @@ static int fix_win_file_permissions(const char *file)
Copy file for backup/restore.
@return true in case of success. */
bool
-copy_file(ds_ctxt_t *datasink,
- const char *src_file_path,
- const char *dst_file_path,
- uint thread_n)
+ds_ctxt_t::copy_file(const char *src_file_path,
+ const char *dst_file_path,
+ uint thread_n)
{
char dst_name[FN_REFLEN];
ds_file_t *dstfile = NULL;
datafile_cur_t cursor;
xb_fil_cur_result_t res;
- DBUG_ASSERT(datasink->datasink->remove);
+ DBUG_ASSERT(datasink->remove);
const char *dst_path =
(xtrabackup_copy_back || xtrabackup_move_back)?
dst_file_path : trim_dotslash(dst_file_path);
@@ -1074,7 +1075,7 @@ copy_file(ds_ctxt_t *datasink,
strncpy(dst_name, cursor.rel_path, sizeof(dst_name));
- dstfile = ds_open(datasink, dst_path, &cursor.statinfo);
+ dstfile = ds_open(this, dst_path, &cursor.statinfo);
if (dstfile == NULL) {
msg(thread_n,"error: "
"cannot open the destination stream for %s", dst_name);
@@ -1111,7 +1112,7 @@ copy_file(ds_ctxt_t *datasink,
error:
datafile_close(&cursor);
if (dstfile != NULL) {
- datasink->datasink->remove(dstfile->path);
+ datasink->remove(dstfile->path);
ds_close(dstfile);
}
@@ -1125,12 +1126,10 @@ error_close:
Try to move file by renaming it. If source and destination are on
different devices fall back to copy and unlink.
@return true in case of success. */
-static
bool
-move_file(ds_ctxt_t *datasink,
- const char *src_file_path,
- const char *dst_file_path,
- const char *dst_dir, uint thread_n)
+ds_ctxt_t::move_file(const char *src_file_path,
+ const char *dst_file_path,
+ const char *dst_dir, uint thread_n)
{
char errbuf[MYSYS_STRERROR_SIZE];
char dst_file_path_abs[FN_REFLEN];
@@ -1157,7 +1156,7 @@ move_file(ds_ctxt_t *datasink,
if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) {
if (my_errno == EXDEV) {
/* Fallback to copy/unlink */
- if(!copy_file(datasink, src_file_path,
+ if(!copy_file(src_file_path,
dst_file_path, thread_n))
return false;
msg(thread_n,"Removing %s", src_file_path);
@@ -1241,13 +1240,13 @@ Copy or move file depending on current mode.
@return true in case of success. */
static
bool
-copy_or_move_file(const char *src_file_path,
+copy_or_move_file(ds_ctxt *datasink0, const char *src_file_path,
const char *dst_file_path,
const char *dst_dir,
uint thread_n,
bool copy = xtrabackup_copy_back)
{
- ds_ctxt_t *datasink = ds_data; /* copy to datadir by default */
+ ds_ctxt_t *datasink = datasink0; /* copy to datadir by default */
char filedir[FN_REFLEN];
size_t filedir_len;
bool ret;
@@ -1295,13 +1294,13 @@ copy_or_move_file(const char *src_file_path,
}
ret = (copy ?
- copy_file(datasink, src_file_path, dst_file_path, thread_n) :
- move_file(datasink, src_file_path, dst_file_path,
+ datasink->copy_file(src_file_path, dst_file_path, thread_n) :
+ datasink->move_file(src_file_path, dst_file_path,
dst_dir, thread_n));
cleanup:
- if (datasink != ds_data) {
+ if (datasink != datasink0) {
ds_destroy(datasink);
}
@@ -1313,7 +1312,7 @@ cleanup:
static
bool
-backup_files(const char *from, bool prep_mode)
+backup_files(ds_ctxt *ds_data, const char *from, bool prep_mode)
{
char rsync_tmpfile_name[FN_REFLEN];
FILE *rsync_tmpfile = NULL;
@@ -1351,7 +1350,7 @@ backup_files(const char *from, bool prep_mode)
ret = datafile_rsync_backup(node.filepath,
!prep_mode, rsync_tmpfile);
} else {
- ret = datafile_copy_backup(node.filepath, 1);
+ ret = datafile_copy_backup(ds_data, node.filepath, 1);
}
if (!ret) {
msg("Failed to copy file %s", node.filepath);
@@ -1362,7 +1361,7 @@ backup_files(const char *from, bool prep_mode)
char path[FN_REFLEN];
snprintf(path, sizeof(path),
"%s/db.opt", node.filepath);
- if (!(ret = backup_file_printf(
+ if (!(ret = ds_data->backup_file_printf(
trim_dotslash(path), "%s", ""))) {
msg("Failed to create file %s", path);
goto out;
@@ -1451,7 +1450,6 @@ out:
return(ret);
}
-void backup_fix_ddl(CorruptedPages &);
lsn_t get_current_lsn(MYSQL *connection)
{
@@ -1476,7 +1474,8 @@ lsn_t get_current_lsn(MYSQL *connection)
lsn_t server_lsn_after_lock;
extern void backup_wait_for_lsn(lsn_t lsn);
/** Start --backup */
-bool backup_start(CorruptedPages &corrupted_pages)
+bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta,
+ CorruptedPages &corrupted_pages)
{
if (!opt_no_lock) {
if (opt_safe_slave_backup) {
@@ -1485,7 +1484,7 @@ bool backup_start(CorruptedPages &corrupted_pages)
}
}
- if (!backup_files(fil_path_to_mysql_datadir, true)) {
+ if (!backup_files(ds_data, fil_path_to_mysql_datadir, true)) {
return(false);
}
@@ -1497,11 +1496,15 @@ bool backup_start(CorruptedPages &corrupted_pages)
server_lsn_after_lock = get_current_lsn(mysql_connection);
}
- if (!backup_files(fil_path_to_mysql_datadir, false)) {
+ if (!backup_files(ds_data, fil_path_to_mysql_datadir, false)) {
return(false);
}
- if (!backup_files_from_datadir(fil_path_to_mysql_datadir)) {
+ if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir,
+ "aws-kms-key") ||
+ !backup_files_from_datadir(ds_data,
+ aria_log_dir_path,
+ "aria_log")) {
return false;
}
@@ -1511,7 +1514,7 @@ bool backup_start(CorruptedPages &corrupted_pages)
msg("Waiting for log copy thread to read lsn %llu", (ulonglong)server_lsn_after_lock);
backup_wait_for_lsn(server_lsn_after_lock);
- backup_fix_ddl(corrupted_pages);
+ corrupted_pages.backup_fix_ddl(ds_data, ds_meta);
// There is no need to stop slave thread before coping non-Innodb data when
// --no-lock option is used because --no-lock option requires that no DDL or
@@ -1527,7 +1530,7 @@ bool backup_start(CorruptedPages &corrupted_pages)
if (opt_slave_info) {
lock_binlog_maybe(mysql_connection);
- if (!write_slave_info(mysql_connection)) {
+ if (!write_slave_info(ds_data, mysql_connection)) {
return(false);
}
}
@@ -1539,7 +1542,7 @@ bool backup_start(CorruptedPages &corrupted_pages)
avoid that is to have a single process, i.e. merge innobackupex and
xtrabackup. */
if (opt_galera_info) {
- if (!write_galera_info(mysql_connection)) {
+ if (!write_galera_info(ds_data, mysql_connection)) {
return(false);
}
}
@@ -1547,7 +1550,7 @@ bool backup_start(CorruptedPages &corrupted_pages)
if (opt_binlog_info == BINLOG_INFO_ON) {
lock_binlog_maybe(mysql_connection);
- write_binlog_info(mysql_connection);
+ write_binlog_info(ds_data, mysql_connection);
}
if (have_flush_engine_logs && !opt_no_lock) {
@@ -1584,20 +1587,20 @@ void backup_release()
static const char *default_buffer_pool_file = "ib_buffer_pool";
/** Finish after backup_start() and backup_release() */
-bool backup_finish()
+bool backup_finish(ds_ctxt *ds_data)
{
/* Copy buffer pool dump or LRU dump */
if (!opt_rsync && opt_galera_info) {
if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
- copy_file(ds_data, buffer_pool_filename, default_buffer_pool_file, 0);
+ ds_data->copy_file(buffer_pool_filename, default_buffer_pool_file, 0);
}
if (file_exists("ib_lru_dump")) {
- copy_file(ds_data, "ib_lru_dump", "ib_lru_dump", 0);
+ ds_data->copy_file("ib_lru_dump", "ib_lru_dump", 0);
}
}
if (has_rocksdb_plugin()) {
- rocksdb_backup_checkpoint();
+ rocksdb_backup_checkpoint(ds_data);
}
msg("Backup created in directory '%s'", xtrabackup_target_dir);
@@ -1609,11 +1612,11 @@ bool backup_finish()
mysql_slave_position);
}
- if (!write_backup_config_file()) {
+ if (!write_backup_config_file(ds_data)) {
return(false);
}
- if (!write_xtrabackup_info(mysql_connection, XTRABACKUP_INFO,
+ if (!write_xtrabackup_info(ds_data, mysql_connection, XTRABACKUP_INFO,
opt_history != 0, true)) {
return(false);
}
@@ -1680,6 +1683,7 @@ ibx_copy_incremental_over_full()
bool ret = true;
char path[FN_REFLEN];
int i;
+ ds_ctxt *ds_data= NULL;
DBUG_ASSERT(!opt_galera_info);
datadir_node_init(&node);
@@ -1707,15 +1711,20 @@ ibx_copy_incremental_over_full()
unlink(node.filepath_rel);
}
- if (!(ret = copy_file(ds_data, node.filepath,
- node.filepath_rel, 1))) {
+ if (!(ret = ds_data->copy_file(node.filepath,
+ node.filepath_rel, 1))) {
msg("Failed to copy file %s",
node.filepath);
goto cleanup;
}
}
- if (!(ret = backup_files_from_datadir(xtrabackup_incremental_dir)))
+ if (!(ret = backup_files_from_datadir(ds_data,
+ xtrabackup_incremental_dir,
+ "aws-kms-key")) ||
+ !(ret = backup_files_from_datadir(ds_data,
+ xtrabackup_incremental_dir,
+ "aria_log")))
goto cleanup;
/* copy supplementary files */
@@ -1730,7 +1739,7 @@ ibx_copy_incremental_over_full()
if (file_exists(sup_files[i])) {
unlink(sup_files[i]);
}
- copy_file(ds_data, path, sup_files[i], 0);
+ ds_data->copy_file(path, sup_files[i], 0);
}
}
@@ -1744,7 +1753,7 @@ ibx_copy_incremental_over_full()
if (my_mkdir(ROCKSDB_BACKUP_DIR, 0777, MYF(0))) {
die("my_mkdir failed for " ROCKSDB_BACKUP_DIR);
}
- copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
+ ds_data->copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
}
ibx_incremental_drop_databases(xtrabackup_target_dir,
xtrabackup_incremental_dir);
@@ -1830,6 +1839,39 @@ public:
}
};
+
+static inline bool
+is_aria_log_dir_file(const datadir_node_t &node)
+{
+ return starts_with(node.filepath_rel, "aria_log");
+}
+
+
+bool
+copy_back_aria_logs(const char *dstdir)
+{
+ std::unique_ptr<ds_ctxt_t, void (&)(ds_ctxt_t*)>
+ ds_ctxt_aria_log_dir_path(ds_create(dstdir, DS_TYPE_LOCAL), ds_destroy);
+
+ datadir_node_t node;
+ datadir_node_init(&node);
+ datadir_iter_t *it = datadir_iter_new(".", false);
+
+ while (datadir_iter_next(it, &node))
+ {
+ if (!is_aria_log_dir_file(node))
+ continue;
+ if (!copy_or_move_file(ds_ctxt_aria_log_dir_path.get(),
+ node.filepath, node.filepath_rel,
+ dstdir, 1))
+ return false;
+ }
+ datadir_node_free(&node);
+ datadir_iter_free(it);
+ return true;
+}
+
+
bool
copy_back()
{
@@ -1863,6 +1905,13 @@ copy_back()
return(false);
}
+ Copy_back_dst_dir aria_log_dir_path_dst;
+ const char *aria_log_dir_path_abs= aria_log_dir_path_dst.make(aria_log_dir_path);
+ if (aria_log_dir_path && *aria_log_dir_path
+ && !directory_exists(aria_log_dir_path_abs, true)) {
+ return false;
+ }
+
/* cd to backup directory */
if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
{
@@ -1870,6 +1919,9 @@ copy_back()
return(false);
}
+ if (!copy_back_aria_logs(aria_log_dir_path_abs))
+ return false;
+
/* parse data file path */
if (!innobase_data_file_path) {
@@ -1892,7 +1944,7 @@ copy_back()
dst_dir = dst_dir_buf.make(srv_undo_dir);
- ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+ ds_ctxt *ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL);
for (uint i = 1; i <= TRX_SYS_MAX_UNDO_SPACES; i++) {
char filename[20];
@@ -1900,14 +1952,14 @@ copy_back()
if (!file_exists(filename)) {
break;
}
- if (!(ret = copy_or_move_file(filename, filename,
+ if (!(ret = copy_or_move_file(ds_tmp, filename, filename,
dst_dir, 1))) {
goto cleanup;
}
}
- ds_destroy(ds_data);
- ds_data = NULL;
+ ds_destroy(ds_tmp);
+ ds_tmp = NULL;
/* copy redo logs */
@@ -1916,7 +1968,7 @@ copy_back()
/* --backup generates a single LOG_FILE_NAME, which we must copy
if it exists. */
- ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+ ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL);
MY_STAT stat_arg;
if (!my_stat(LOG_FILE_NAME, &stat_arg, MYF(0)) || !stat_arg.st_size) {
/* After completed --prepare, redo log files are redundant.
@@ -1931,17 +1983,17 @@ copy_back()
snprintf(filename, sizeof filename, "%s/%s101", dst_dir,
LOG_FILE_NAME_PREFIX);
unlink(filename);
- } else if (!(ret = copy_or_move_file(LOG_FILE_NAME, LOG_FILE_NAME,
+ } else if (!(ret = copy_or_move_file(ds_tmp, LOG_FILE_NAME, LOG_FILE_NAME,
dst_dir, 1))) {
goto cleanup;
}
- ds_destroy(ds_data);
+ ds_destroy(ds_tmp);
/* copy innodb system tablespace(s) */
dst_dir = dst_dir_buf.make(innobase_data_home_dir);
- ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+ ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL);
for (Tablespace::const_iterator iter(srv_sys_space.begin()),
end(srv_sys_space.end());
@@ -1949,16 +2001,16 @@ copy_back()
++iter) {
const char *filename = base_name(iter->name());
- if (!(ret = copy_or_move_file(filename, iter->name(),
+ if (!(ret = copy_or_move_file(ds_tmp, filename, iter->name(),
dst_dir, 1))) {
goto cleanup;
}
}
- ds_destroy(ds_data);
+ ds_destroy(ds_tmp);
/* copy the rest of tablespaces */
- ds_data = ds_create(mysql_data_home, DS_TYPE_LOCAL);
+ ds_tmp = ds_create(mysql_data_home, DS_TYPE_LOCAL);
it = datadir_iter_new(".", false);
@@ -1974,6 +2026,10 @@ copy_back()
int i_tmp;
bool is_ibdata_file;
+ /* Skip aria log files */
+ if (is_aria_log_dir_file(node))
+ continue;
+
if (strstr(node.filepath,"/" ROCKSDB_BACKUP_DIR "/")
#ifdef _WIN32
|| strstr(node.filepath,"\\" ROCKSDB_BACKUP_DIR "\\")
@@ -2045,7 +2101,7 @@ copy_back()
continue;
}
- if (!(ret = copy_or_move_file(node.filepath, node.filepath_rel,
+ if (!(ret = copy_or_move_file(ds_tmp, node.filepath, node.filepath_rel,
mysql_data_home, 1))) {
goto cleanup;
}
@@ -2055,12 +2111,12 @@ copy_back()
if (file_exists(default_buffer_pool_file) &&
innobase_buffer_pool_filename) {
- copy_or_move_file(default_buffer_pool_file,
+ copy_or_move_file(ds_tmp, default_buffer_pool_file,
innobase_buffer_pool_filename,
mysql_data_home, 0);
}
- rocksdb_copy_back();
+ rocksdb_copy_back(ds_tmp);
cleanup:
if (it != NULL) {
@@ -2069,11 +2125,11 @@ cleanup:
datadir_node_free(&node);
- if (ds_data != NULL) {
- ds_destroy(ds_data);
+ if (ds_tmp != NULL) {
+ ds_destroy(ds_tmp);
}
- ds_data = NULL;
+ ds_tmp = NULL;
sync_check_close();
return(ret);
@@ -2181,7 +2237,7 @@ decrypt_decompress()
}
/* copy the rest of tablespaces */
- ds_data = ds_create(".", DS_TYPE_LOCAL);
+ ds_ctxt *ds_tmp = ds_create(".", DS_TYPE_LOCAL);
it = datadir_iter_new(".", false);
@@ -2194,11 +2250,11 @@ decrypt_decompress()
datadir_iter_free(it);
}
- if (ds_data != NULL) {
- ds_destroy(ds_data);
+ if (ds_tmp != NULL) {
+ ds_destroy(ds_tmp);
}
- ds_data = NULL;
+ ds_tmp = NULL;
sync_check_close();
@@ -2210,7 +2266,9 @@ decrypt_decompress()
Do not copy the Innodb files (ibdata1, redo log files),
as this is done in a separate step.
*/
-static bool backup_files_from_datadir(const char *dir_path)
+static bool backup_files_from_datadir(ds_ctxt_t *ds_data,
+ const char *dir_path,
+ const char *prefix)
{
os_file_dir_t dir = os_file_opendir(dir_path);
if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false;
@@ -2226,8 +2284,7 @@ static bool backup_files_from_datadir(const char *dir_path)
if (!pname)
pname = info.name;
- if (!starts_with(pname, "aws-kms-key") &&
- !starts_with(pname, "aria_log"))
+ if (!starts_with(pname, prefix))
/* For ES exchange the above line with the following code:
(!xtrabackup_prepare || !xtrabackup_incremental_dir ||
!starts_with(pname, "aria_log")))
@@ -2240,7 +2297,7 @@ static bool backup_files_from_datadir(const char *dir_path)
std::string full_path(dir_path);
full_path.append(1, OS_PATH_SEPARATOR).append(info.name);
- if (!(ret = copy_file(ds_data, full_path.c_str() , info.name, 1)))
+ if (!(ret = ds_data->copy_file(full_path.c_str() , info.name, 1)))
break;
}
os_file_closedir(dir);
@@ -2290,13 +2347,14 @@ static char *trim_trailing_dir_sep(char *path)
Create a file hardlink.
@return true on success, false on error.
*/
-static bool make_hardlink(const char *from_path, const char *to_path)
+bool
+ds_ctxt_t::make_hardlink(const char *from_path, const char *to_path)
{
DBUG_EXECUTE_IF("no_hardlinks", return false;);
char to_path_full[FN_REFLEN];
if (!is_abs_path(to_path))
{
- fn_format(to_path_full, to_path, ds_data->root, "", MYF(MY_RELATIVE_PATH));
+ fn_format(to_path_full, to_path, root, "", MYF(MY_RELATIVE_PATH));
}
else
{
@@ -2317,7 +2375,9 @@ static bool make_hardlink(const char *from_path, const char *to_path)
Has optimization that allows to use hardlinks when possible
(source and destination are directories on the same device)
*/
-static void copy_or_move_dir(const char *from, const char *to, bool do_copy, bool allow_hardlinks)
+void
+ds_ctxt_t::copy_or_move_dir(const char *from, const char *to,
+ bool do_copy, bool allow_hardlinks)
{
datadir_node_t node;
datadir_node_init(&node);
@@ -2345,8 +2405,8 @@ static void copy_or_move_dir(const char *from, const char *to, bool do_copy, boo
if (!rc)
{
rc = (do_copy ?
- copy_file(ds_data, from_path, to_path, 1) :
- move_file(ds_data, from_path, node.filepath_rel,
+ copy_file(from_path, to_path, 1) :
+ move_file(from_path, node.filepath_rel,
to, 1));
}
if (!rc)
@@ -2443,7 +2503,7 @@ static void rocksdb_create_checkpoint()
remove temp.checkpoint directory (in server's datadir)
and release user level lock acquired inside rocksdb_create_checkpoint().
*/
-static void rocksdb_backup_checkpoint()
+static void rocksdb_backup_checkpoint(ds_ctxt *ds_data)
{
msg("Backing up rocksdb files.");
char rocksdb_backup_dir[FN_REFLEN];
@@ -2455,7 +2515,7 @@ static void rocksdb_backup_checkpoint()
die("Can't create rocksdb backup directory %s", rocksdb_backup_dir);
}
}
- copy_or_move_dir(rocksdb_checkpoint_dir, ROCKSDB_BACKUP_DIR, true, backup_to_directory);
+ ds_data->copy_or_move_dir(rocksdb_checkpoint_dir, ROCKSDB_BACKUP_DIR, true, backup_to_directory);
rocksdb_remove_checkpoint_directory();
rocksdb_unlock_checkpoint();
}
@@ -2463,7 +2523,7 @@ static void rocksdb_backup_checkpoint()
/*
Copies #rocksdb directory to the $rockdb_data_dir, on copy-back
*/
-static void rocksdb_copy_back() {
+static void rocksdb_copy_back(ds_ctxt *ds_data) {
if (access(ROCKSDB_BACKUP_DIR, 0))
return;
char rocksdb_home_dir[FN_REFLEN];
@@ -2475,5 +2535,5 @@ static void rocksdb_copy_back() {
xb_rocksdb_datadir?trim_dotslash(xb_rocksdb_datadir): ROCKSDB_BACKUP_DIR);
}
mkdirp(rocksdb_home_dir, 0777, MYF(0));
- copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back);
+ ds_data->copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back);
}
diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h
index 62b2b1bc232..b4a323f2e89 100644
--- a/extra/mariabackup/backup_copy.h
+++ b/extra/mariabackup/backup_copy.h
@@ -14,30 +14,18 @@
extern bool binlog_locked;
-bool
-backup_file_printf(const char *filename, const char *fmt, ...)
- ATTRIBUTE_FORMAT(printf, 2, 0);
-
/************************************************************************
Return true if first and second arguments are the same path. */
bool
equal_paths(const char *first, const char *second);
-/************************************************************************
-Copy file for backup/restore.
-@return true in case of success. */
-bool
-copy_file(ds_ctxt_t *datasink,
- const char *src_file_path,
- const char *dst_file_path,
- uint thread_n);
-
/** Start --backup */
-bool backup_start(CorruptedPages &corrupted_pages);
+bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta,
+ CorruptedPages &corrupted_pages);
/** Release resources after backup_start() */
void backup_release();
/** Finish after backup_start() and backup_release() */
-bool backup_finish();
+bool backup_finish(ds_ctxt *ds_data);
bool
apply_log_finish();
bool
@@ -51,6 +39,5 @@ directory_exists(const char *dir, bool create);
lsn_t
get_current_lsn(MYSQL *connection);
-bool backup_file_print_buf(const char *filename, const char *buf, int buf_len);
#endif
diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc
index ef79c8d561e..4f151b95941 100644
--- a/extra/mariabackup/backup_mysql.cc
+++ b/extra/mariabackup/backup_mysql.cc
@@ -366,6 +366,7 @@ bool get_mysql_vars(MYSQL *connection)
char *innodb_undo_directory_var= NULL;
char *innodb_page_size_var= NULL;
char *innodb_undo_tablespaces_var= NULL;
+ char *aria_log_dir_path_var= NULL;
char *page_zip_level_var= NULL;
char *ignore_db_dirs= NULL;
char *endptr;
@@ -396,6 +397,7 @@ bool get_mysql_vars(MYSQL *connection)
{"innodb_undo_tablespaces", &innodb_undo_tablespaces_var},
{"innodb_compression_level", &page_zip_level_var},
{"ignore_db_dirs", &ignore_db_dirs},
+ {"aria_log_dir_path", &aria_log_dir_path_var},
{NULL, NULL}};
read_mysql_variables(connection, "SHOW VARIABLES", mysql_vars, true);
@@ -527,6 +529,12 @@ bool get_mysql_vars(MYSQL *connection)
ut_ad(*endptr == 0);
}
+ if (aria_log_dir_path_var)
+ {
+ aria_log_dir_path= my_strdup(PSI_NOT_INSTRUMENTED,
+ aria_log_dir_path_var, MYF(MY_FAE));
+ }
+
if (page_zip_level_var != NULL)
{
page_zip_level= static_cast<uint>(strtoul(page_zip_level_var, &endptr,
@@ -1373,7 +1381,7 @@ variable.
@returns true on success
*/
bool
-write_slave_info(MYSQL *connection)
+write_slave_info(ds_ctxt *datasink, MYSQL *connection)
{
String sql, comment;
bool show_all_slaves_status= false;
@@ -1403,7 +1411,8 @@ write_slave_info(MYSQL *connection)
}
mysql_slave_position= strdup(comment.c_ptr());
- return backup_file_print_buf(XTRABACKUP_SLAVE_INFO, sql.ptr(), sql.length());
+ return datasink->backup_file_print_buf(XTRABACKUP_SLAVE_INFO,
+ sql.ptr(), sql.length());
}
@@ -1411,7 +1420,7 @@ write_slave_info(MYSQL *connection)
Retrieves MySQL Galera and
saves it in a file. It also prints it to stdout. */
bool
-write_galera_info(MYSQL *connection)
+write_galera_info(ds_ctxt *datasink, MYSQL *connection)
{
char *state_uuid = NULL, *state_uuid55 = NULL;
char *last_committed = NULL, *last_committed55 = NULL;
@@ -1443,12 +1452,12 @@ write_galera_info(MYSQL *connection)
goto cleanup;
}
- result = backup_file_printf(XTRABACKUP_GALERA_INFO,
+ result = datasink->backup_file_printf(XTRABACKUP_GALERA_INFO,
"%s:%s\n", state_uuid ? state_uuid : state_uuid55,
last_committed ? last_committed : last_committed55);
if (result)
{
- write_current_binlog_file(connection);
+ write_current_binlog_file(datasink, connection);
}
cleanup:
@@ -1462,7 +1471,7 @@ cleanup:
Flush and copy the current binary log file into the backup,
if GTID is enabled */
bool
-write_current_binlog_file(MYSQL *connection)
+write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection)
{
char *executed_gtid_set = NULL;
char *gtid_binlog_state = NULL;
@@ -1532,7 +1541,7 @@ write_current_binlog_file(MYSQL *connection)
snprintf(filepath, sizeof(filepath), "%s%c%s",
log_bin_dir, FN_LIBCHAR, log_bin_file);
- result = copy_file(ds_data, filepath, log_bin_file, 0);
+ result = datasink->copy_file(filepath, log_bin_file, 0);
}
cleanup:
@@ -1548,7 +1557,7 @@ cleanup:
Retrieves MySQL binlog position and
saves it in a file. It also prints it to stdout. */
bool
-write_binlog_info(MYSQL *connection)
+write_binlog_info(ds_ctxt *datasink, MYSQL *connection)
{
char *filename = NULL;
char *position = NULL;
@@ -1593,14 +1602,14 @@ write_binlog_info(MYSQL *connection)
"filename '%s', position '%s', "
"GTID of the last change '%s'",
filename, position, gtid) != -1);
- result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
+ result = datasink->backup_file_printf(XTRABACKUP_BINLOG_INFO,
"%s\t%s\t%s\n", filename, position,
gtid);
} else {
ut_a(asprintf(&mysql_binlog_position,
"filename '%s', position '%s'",
filename, position) != -1);
- result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
+ result = datasink->backup_file_printf(XTRABACKUP_BINLOG_INFO,
"%s\t%s\n", filename, position);
}
@@ -1640,8 +1649,9 @@ PERCONA_SCHEMA.xtrabackup_history and writes a new history record to the
table containing all the history info particular to the just completed
backup. */
bool
-write_xtrabackup_info(MYSQL *connection, const char * filename, bool history,
- bool stream)
+write_xtrabackup_info(ds_ctxt *datasink,
+ MYSQL *connection, const char * filename, bool history,
+ bool stream)
{
bool result = true;
@@ -1717,7 +1727,7 @@ write_xtrabackup_info(MYSQL *connection, const char * filename, bool history,
}
if (stream) {
- backup_file_printf(filename, "%s", buf);
+ datasink->backup_file_printf(filename, "%s", buf);
} else {
fp = fopen(filename, "w");
if (!fp) {
@@ -1838,9 +1848,9 @@ static std::string make_local_paths(const char *data_file_path)
return buf.str();
}
-bool write_backup_config_file()
+bool write_backup_config_file(ds_ctxt *datasink)
{
- int rc= backup_file_printf("backup-my.cnf",
+ int rc= datasink->backup_file_printf("backup-my.cnf",
"# This options file was generated by innobackupex.\n\n"
"# The server\n"
"[mysqld]\n"
diff --git a/extra/mariabackup/backup_mysql.h b/extra/mariabackup/backup_mysql.h
index b61fa2362c6..d80f3bb7bc1 100644
--- a/extra/mariabackup/backup_mysql.h
+++ b/extra/mariabackup/backup_mysql.h
@@ -62,17 +62,18 @@ void
unlock_all(MYSQL *connection);
bool
-write_current_binlog_file(MYSQL *connection);
+write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection);
bool
-write_binlog_info(MYSQL *connection);
+write_binlog_info(ds_ctxt *datasink, MYSQL *connection);
bool
-write_xtrabackup_info(MYSQL *connection, const char * filename, bool history,
- bool stream);
+write_xtrabackup_info(ds_ctxt *datasink,
+ MYSQL *connection, const char * filename, bool history,
+ bool stream);
bool
-write_backup_config_file();
+write_backup_config_file(ds_ctxt *datasink);
bool
lock_binlog_maybe(MYSQL *connection);
@@ -84,10 +85,10 @@ bool
wait_for_safe_slave(MYSQL *connection);
bool
-write_galera_info(MYSQL *connection);
+write_galera_info(ds_ctxt *datasink, MYSQL *connection);
bool
-write_slave_info(MYSQL *connection);
+write_slave_info(ds_ctxt *datasink, MYSQL *connection);
#endif
diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h
index 4bede4ec9e7..57468e0c9c7 100644
--- a/extra/mariabackup/datasink.h
+++ b/extra/mariabackup/datasink.h
@@ -37,6 +37,35 @@ typedef struct ds_ctxt {
char *root;
void *ptr;
struct ds_ctxt *pipe_ctxt;
+ /*
+ Copy file for backup/restore.
+ @return true in case of success.
+ */
+ bool copy_file(const char *src_file_path,
+ const char *dst_file_path,
+ uint thread_n);
+
+ bool move_file(const char *src_file_path,
+ const char *dst_file_path,
+ const char *dst_dir,
+ uint thread_n);
+
+ bool make_hardlink(const char *from_path, const char *to_path);
+
+ void copy_or_move_dir(const char *from, const char *to,
+ bool do_copy, bool allow_hardlinks);
+
+ bool backup_file_vprintf(const char *filename,
+ const char *fmt, va_list ap);
+
+ bool backup_file_print_buf(const char *filename,
+ const char *buf,
+ int buf_len);
+
+ bool backup_file_printf(const char *filename,
+ const char *fmt, ...)
+ ATTRIBUTE_FORMAT(printf, 2, 0);
+
} ds_ctxt_t;
typedef struct {
diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc
index 8339286e1df..9e571e96556 100644
--- a/extra/mariabackup/write_filt.cc
+++ b/extra/mariabackup/write_filt.cc
@@ -31,7 +31,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
/************************************************************************
Write-through page write filter. */
-static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+static my_bool wf_wt_init(ds_ctxt *ds_meta,
+ xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages);
static my_bool wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
@@ -44,7 +45,8 @@ xb_write_filt_t wf_write_through = {
/************************************************************************
Incremental page write filter. */
-static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+static my_bool wf_incremental_init(ds_ctxt *ds_meta,
+ xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages);
static my_bool wf_incremental_process(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile);
@@ -64,7 +66,8 @@ Initialize incremental page write filter.
@return TRUE on success, FALSE on error. */
static my_bool
-wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+wf_incremental_init(ds_ctxt *ds_meta,
+ xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages)
{
char meta_name[FN_REFLEN];
@@ -88,7 +91,7 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
XB_DELTA_INFO_SUFFIX);
const xb_delta_info_t info(cursor->page_size, cursor->zip_size,
cursor->space_id);
- if (!xb_write_delta_metadata(meta_name, &info)) {
+ if (!xb_write_delta_metadata(ds_meta, meta_name, &info)) {
msg(cursor->thread_n,"Error: "
"failed to write meta info for %s",
cursor->rel_path);
@@ -195,7 +198,8 @@ Initialize the write-through page write filter.
@return TRUE on success, FALSE on error. */
static my_bool
-wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)),
+wf_wt_init(ds_ctxt *ds_meta __attribute__((unused)),
+ xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)),
xb_fil_cur_t *cursor, CorruptedPages *)
{
ctxt->cursor = cursor;
diff --git a/extra/mariabackup/write_filt.h b/extra/mariabackup/write_filt.h
index 6c3ef24291f..a0ce0778a7f 100644
--- a/extra/mariabackup/write_filt.h
+++ b/extra/mariabackup/write_filt.h
@@ -45,7 +45,8 @@ typedef struct {
typedef struct {
- my_bool (*init)(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ my_bool (*init)(ds_ctxt *ds_meta,
+ xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages);
my_bool (*process)(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
my_bool (*finalize)(xb_write_filt_ctxt_t *, ds_file_t *dstfile);
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 7d45337bb18..6254b8c7237 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -118,6 +118,12 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#define MB_CORRUPTED_PAGES_FILE "innodb_corrupted_pages"
+// disable server's systemd notification code
+extern "C" {
+int sd_notify() { return 0; }
+int sd_notifyf() { return 0; }
+}
+
int sys_var_init();
/* === xtrabackup specific options === */
@@ -261,6 +267,7 @@ static char* innobase_ignored_opt;
char* innobase_data_home_dir;
char* innobase_data_file_path;
+char *aria_log_dir_path;
my_bool xtrabackup_incremental_force_scan = FALSE;
@@ -278,10 +285,66 @@ char *xb_plugin_dir;
char *xb_plugin_load;
my_bool xb_close_files;
-/* Datasinks */
-ds_ctxt_t *ds_data = NULL;
-ds_ctxt_t *ds_meta = NULL;
-ds_ctxt_t *ds_redo = NULL;
+
+class Datasink_free_list
+{
+protected:
+ /*
+ Simple datasink creation tracking...
+ add datasinks in the reverse order you want them destroyed.
+ */
+#define XTRABACKUP_MAX_DATASINKS 10
+ ds_ctxt_t *m_datasinks_to_destroy[XTRABACKUP_MAX_DATASINKS];
+ uint m_actual_datasinks_to_destroy;
+public:
+ Datasink_free_list()
+ :m_actual_datasinks_to_destroy(0)
+ { }
+
+ void add_datasink_to_destroy(ds_ctxt_t *ds)
+ {
+ xb_ad(m_actual_datasinks_to_destroy < XTRABACKUP_MAX_DATASINKS);
+ m_datasinks_to_destroy[m_actual_datasinks_to_destroy] = ds;
+ m_actual_datasinks_to_destroy++;
+ }
+
+ /*
+ Destroy datasinks.
+ Destruction is done in the specific order to not violate their order in the
+ pipeline so that each datasink is able to flush data down the pipeline.
+ */
+ void destroy()
+ {
+ for (uint i= m_actual_datasinks_to_destroy; i > 0; i--)
+ {
+ ds_destroy(m_datasinks_to_destroy[i - 1]);
+ m_datasinks_to_destroy[i - 1] = NULL;
+ }
+ }
+};
+
+
+class Backup_datasinks: public Datasink_free_list
+{
+public:
+ ds_ctxt_t *m_data;
+ ds_ctxt_t *m_meta;
+ ds_ctxt_t *m_redo;
+
+ Backup_datasinks()
+ :m_data(NULL),
+ m_meta(NULL),
+ m_redo(NULL)
+ { }
+ void init();
+ void destroy()
+ {
+ Datasink_free_list::destroy();
+ *this= Backup_datasinks();
+ }
+ bool backup_low();
+};
+
static bool innobackupex_mode = false;
@@ -430,7 +493,8 @@ void CorruptedPages::rename_space(ulint space_id, const std::string &new_name)
ut_a(!pthread_mutex_unlock(&m_mutex));
}
-bool CorruptedPages::print_to_file(const char *filename) const
+bool CorruptedPages::print_to_file(ds_ctxt *ds_data,
+ const char *filename) const
{
std::ostringstream out;
ut_a(!pthread_mutex_lock(&m_mutex));
@@ -458,8 +522,8 @@ bool CorruptedPages::print_to_file(const char *filename) const
out << "\n";
}
ut_a(!pthread_mutex_unlock(&m_mutex));
- if (xtrabackup_backup)
- return backup_file_print_buf(filename, out.str().c_str(),
+ if (ds_data)
+ return ds_data->backup_file_print_buf(filename, out.str().c_str(),
static_cast<int>(out.str().size()));
std::ofstream outfile;
outfile.open(filename);
@@ -580,19 +644,6 @@ void CorruptedPages::zero_out_free_pages()
aligned_free(zero_page);
}
-/* Simple datasink creation tracking...add datasinks in the reverse order you
-want them destroyed. */
-#define XTRABACKUP_MAX_DATASINKS 10
-static ds_ctxt_t *datasinks[XTRABACKUP_MAX_DATASINKS];
-static uint actual_datasinks = 0;
-static inline
-void
-xtrabackup_add_datasink(ds_ctxt_t *ds)
-{
- xb_ad(actual_datasinks < XTRABACKUP_MAX_DATASINKS);
- datasinks[actual_datasinks] = ds; actual_datasinks++;
-}
-
typedef void (*process_single_tablespace_func_t)(const char *dirname,
const char *filname,
bool is_remote,
@@ -927,6 +978,7 @@ typedef struct {
pthread_mutex_t* count_mutex;
os_thread_id_t id;
CorruptedPages *corrupted_pages;
+ Backup_datasinks *datasinks;
} data_thread_ctxt_t;
/* ======== for option and variables ======== */
@@ -1030,7 +1082,8 @@ enum options_xtrabackup
OPT_XTRA_CHECK_PRIVILEGES,
OPT_XTRA_MYSQLD_ARGS,
OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION,
- OPT_INNODB_FORCE_RECOVERY
+ OPT_INNODB_FORCE_RECOVERY,
+ OPT_ARIA_LOG_DIR_PATH
};
struct my_option xb_client_options[]= {
@@ -1614,6 +1667,11 @@ struct my_option xb_server_options[] =
&xb_plugin_dir, &xb_plugin_dir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ {"aria_log_dir_path", OPT_ARIA_LOG_DIR_PATH,
+ "Path to individual files and their sizes.",
+ &aria_log_dir_path, &aria_log_dir_path,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
{"open_files_limit", OPT_OPEN_FILES_LIMIT, "the maximum number of file "
"descriptors to reserve with setrlimit().",
(G_PTR*) &xb_open_files_limit, (G_PTR*) &xb_open_files_limit, 0, GET_ULONG,
@@ -1929,6 +1987,10 @@ xb_get_one_option(const struct my_option *opt,
}
break;
+ case OPT_ARIA_LOG_DIR_PATH:
+ ADD_PRINT_PARAM_OPT(aria_log_dir_path);
+ break;
+
case OPT_XTRA_TARGET_DIR:
strmake(xtrabackup_real_target_dir,argument, sizeof(xtrabackup_real_target_dir)-1);
xtrabackup_target_dir= xtrabackup_real_target_dir;
@@ -2414,7 +2476,8 @@ xb_read_delta_metadata(const char *filepath, xb_delta_info_t *info)
Write meta info for an incremental delta.
@return TRUE on success, FALSE on failure. */
my_bool
-xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info)
+xb_write_delta_metadata(ds_ctxt *ds_meta,
+ const char *filename, const xb_delta_info_t *info)
{
ds_file_t *f;
char buf[64];
@@ -2725,7 +2788,9 @@ xb_get_copy_action(const char *dflt)
for full backup, pages filter for incremental backup, etc.
@return FALSE on success and TRUE on error */
-static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n,
+static my_bool xtrabackup_copy_datafile(ds_ctxt *ds_data,
+ ds_ctxt *ds_meta,
+ fil_node_t *node, uint thread_n,
const char *dest_name,
const xb_write_filt_t &write_filter,
CorruptedPages &corrupted_pages)
@@ -2793,7 +2858,7 @@ static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n,
ut_a(write_filter.process != NULL);
if (write_filter.init != NULL &&
- !write_filter.init(&write_filt_ctxt, dst_name, &cursor,
+ !write_filter.init(ds_meta, &write_filt_ctxt, dst_name, &cursor,
opt_log_innodb_page_corruption ? &corrupted_pages : NULL)) {
msg (thread_n, "mariabackup: error: failed to initialize page write filter.");
goto error;
@@ -3166,7 +3231,8 @@ DECLARE_THREAD(data_copy_thread_func)(
DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy", node->space->name,
backup_wait_for_lsn(get_current_lsn(mysql_connection)););
/* copy the datafile */
- if (xtrabackup_copy_datafile(node, num, NULL,
+ if (xtrabackup_copy_datafile(ctxt->datasinks->m_data,
+ ctxt->datasinks->m_meta, node, num, NULL,
xtrabackup_incremental ? wf_incremental : wf_write_through,
*ctxt->corrupted_pages))
die("failed to copy datafile.");
@@ -3192,22 +3258,21 @@ Otherwise (i.e. when streaming in the 'tar' format) we need 2 separate datasinks
for the data stream (and don't allow parallel data copying) and for metainfo
files (including LOG_FILE_NAME). The second datasink writes to temporary
files first, and then streams them in a serialized way when closed. */
-static void
-xtrabackup_init_datasinks(void)
+void Backup_datasinks::init()
{
/* Start building out the pipelines from the terminus back */
if (xtrabackup_stream) {
/* All streaming goes to stdout */
- ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir,
- DS_TYPE_STDOUT);
+ m_data = m_meta = m_redo = ds_create(xtrabackup_target_dir,
+ DS_TYPE_STDOUT);
} else {
/* Local filesystem */
- ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir,
- DS_TYPE_LOCAL);
+ m_data = m_meta = m_redo = ds_create(xtrabackup_target_dir,
+ DS_TYPE_LOCAL);
}
/* Track it for destruction */
- xtrabackup_add_datasink(ds_data);
+ add_datasink_to_destroy(m_data);
/* Stream formatting */
if (xtrabackup_stream) {
@@ -3216,66 +3281,50 @@ xtrabackup_init_datasinks(void)
ut_a(xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM);
ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM);
- xtrabackup_add_datasink(ds);
+ add_datasink_to_destroy(ds);
- ds_set_pipe(ds, ds_data);
- ds_data = ds;
+ ds_set_pipe(ds, m_data);
+ m_data = ds;
- ds_redo = ds_meta = ds_data;
+ m_redo = m_meta = m_data;
}
- /* Compression for ds_data and ds_redo */
+ /* Compression for m_data and m_redo */
if (xtrabackup_compress) {
ds_ctxt_t *ds;
/* Use a 1 MB buffer for compressed output stream */
ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
ds_buffer_set_size(ds, 1024 * 1024);
- xtrabackup_add_datasink(ds);
- ds_set_pipe(ds, ds_data);
- if (ds_data != ds_redo) {
- ds_data = ds;
+ add_datasink_to_destroy(ds);
+ ds_set_pipe(ds, m_data);
+ if (m_data != m_redo) {
+ m_data = ds;
ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
ds_buffer_set_size(ds, 1024 * 1024);
- xtrabackup_add_datasink(ds);
- ds_set_pipe(ds, ds_redo);
- ds_redo = ds;
+ add_datasink_to_destroy(ds);
+ ds_set_pipe(ds, m_redo);
+ m_redo = ds;
} else {
- ds_redo = ds_data = ds;
+ m_redo = m_data = ds;
}
ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS);
- xtrabackup_add_datasink(ds);
- ds_set_pipe(ds, ds_data);
- if (ds_data != ds_redo) {
- ds_data = ds;
+ add_datasink_to_destroy(ds);
+ ds_set_pipe(ds, m_data);
+ if (m_data != m_redo) {
+ m_data = ds;
ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS);
- xtrabackup_add_datasink(ds);
- ds_set_pipe(ds, ds_redo);
- ds_redo = ds;
+ add_datasink_to_destroy(ds);
+ ds_set_pipe(ds, m_redo);
+ m_redo = ds;
} else {
- ds_redo = ds_data = ds;
+ m_redo = m_data = ds;
}
}
}
-/************************************************************************
-Destroy datasinks.
-
-Destruction is done in the specific order to not violate their order in the
-pipeline so that each datasink is able to flush data down the pipeline. */
-static void xtrabackup_destroy_datasinks(void)
-{
- for (uint i = actual_datasinks; i > 0; i--) {
- ds_destroy(datasinks[i-1]);
- datasinks[i-1] = NULL;
- }
- ds_data = NULL;
- ds_meta = NULL;
- ds_redo = NULL;
-}
-
#define SRV_MAX_N_PENDING_SYNC_IOS 100
/** Initialize the tablespace cache subsystem. */
@@ -4375,7 +4424,7 @@ static void stop_backup_threads()
/** Implement the core of --backup
@return whether the operation succeeded */
-static bool xtrabackup_backup_low()
+bool Backup_datasinks::backup_low()
{
ut_ad(!metadata_to_lsn);
@@ -4446,7 +4495,7 @@ static bool xtrabackup_backup_low()
}
metadata_last_lsn = log_copy_scanned_lsn;
- if (!xtrabackup_stream_metadata(ds_meta)) {
+ if (!xtrabackup_stream_metadata(m_meta)) {
msg("Error: failed to stream metadata.");
return false;
}
@@ -4462,7 +4511,8 @@ static bool xtrabackup_backup_low()
}
sprintf(filename, "%s/%s", xtrabackup_extra_lsndir,
XTRABACKUP_INFO);
- if (!write_xtrabackup_info(mysql_connection, filename, false, false)) {
+ if (!write_xtrabackup_info(m_data,
+ mysql_connection, filename, false, false)) {
msg("Error: failed to write info "
"to '%s'.", filename);
return false;
@@ -4482,6 +4532,7 @@ static bool xtrabackup_backup_func()
pthread_mutex_t count_mutex;
CorruptedPages corrupted_pages;
data_thread_ctxt_t *data_threads;
+ Backup_datasinks backup_datasinks;
pthread_mutex_init(&backup_mutex, NULL);
pthread_cond_init(&scanned_lsn_cond, NULL);
@@ -4622,7 +4673,7 @@ reread_log_header:
if (err != DB_SUCCESS)
goto fail;
- xtrabackup_init_datasinks();
+ backup_datasinks.init();
if (!select_history()) {
goto fail;
@@ -4630,7 +4681,7 @@ reread_log_header:
/* open the log file */
memset(&stat_info, 0, sizeof(MY_STAT));
- dst_log_file = ds_open(ds_redo, LOG_FILE_NAME, &stat_info);
+ dst_log_file = ds_open(backup_datasinks.m_redo, LOG_FILE_NAME, &stat_info);
if (dst_log_file == NULL) {
msg("Error: failed to open the target stream for '%s'.",
LOG_FILE_NAME);
@@ -4747,6 +4798,7 @@ fail_before_log_copying_thread_start:
data_threads[i].count = &count;
data_threads[i].count_mutex = &count_mutex;
data_threads[i].corrupted_pages = &corrupted_pages;
+ data_threads[i].datasinks= &backup_datasinks;
data_threads[i].id = os_thread_create(data_copy_thread_func,
data_threads + i);
}
@@ -4767,10 +4819,13 @@ fail_before_log_copying_thread_start:
datafiles_iter_free(it);
}
- bool ok = backup_start(corrupted_pages);
+ DBUG_ASSERT(backup_datasinks.m_data);
+ DBUG_ASSERT(backup_datasinks.m_meta);
+ bool ok = backup_start(backup_datasinks.m_data,
+ backup_datasinks.m_meta, corrupted_pages);
if (ok) {
- ok = xtrabackup_backup_low();
+ ok = backup_datasinks.backup_low();
backup_release();
@@ -4780,12 +4835,13 @@ fail_before_log_copying_thread_start:
);
if (ok) {
- backup_finish();
+ backup_finish(backup_datasinks.m_data);
}
}
if (opt_log_innodb_page_corruption)
- ok = corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE);
+ ok = corrupted_pages.print_to_file(backup_datasinks.m_data,
+ MB_CORRUPTED_PAGES_FILE);
if (!ok) {
goto fail;
@@ -4794,7 +4850,7 @@ fail_before_log_copying_thread_start:
if (changed_page_bitmap) {
xb_page_bitmap_deinit(changed_page_bitmap);
}
- xtrabackup_destroy_datasinks();
+ backup_datasinks.destroy();
msg("Redo log (from LSN " LSN_PF " to " LSN_PF
") was copied.", checkpoint_lsn_start, log_copy_scanned_lsn);
@@ -4842,7 +4898,7 @@ FTWRL. This ensures consistent backup in presence of DDL.
It is the responsibility of the prepare phase to deal with .new, .ren, and .del
files.
*/
-void backup_fix_ddl(CorruptedPages &corrupted_pages)
+void CorruptedPages::backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta)
{
std::set<std::string> new_tables;
std::set<std::string> dropped_tables;
@@ -4864,7 +4920,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
if (ddl_tracker.drops.find(id) != ddl_tracker.drops.end()) {
dropped_tables.insert(name);
- corrupted_pages.drop_space(id);
+ drop_space(id);
continue;
}
@@ -4877,7 +4933,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
if (new_name != name) {
renamed_tables[name] = new_name;
if (opt_log_innodb_page_corruption)
- corrupted_pages.rename_space(id, new_name);
+ rename_space(id, new_name);
}
}
@@ -4898,7 +4954,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
dropped_tables.erase(name);
new_tables.insert(name);
if (opt_log_innodb_page_corruption)
- corrupted_pages.drop_space(id);
+ drop_space(id);
}
}
@@ -4907,7 +4963,8 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
iter != renamed_tables.end(); ++iter) {
const std::string old_name = iter->first;
std::string new_name = iter->second;
- backup_file_printf((old_name + ".ren").c_str(), "%s", new_name.c_str());
+ DBUG_ASSERT(ds_data);
+ ds_data->backup_file_printf((old_name + ".ren").c_str(), "%s", new_name.c_str());
}
// Mark tablespaces for drop
@@ -4915,7 +4972,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
iter != dropped_tables.end();
iter++) {
const std::string name(*iter);
- backup_file_printf((name + ".del").c_str(), "%s", "");
+ ds_data->backup_file_printf((name + ".del").c_str(), "%s", "");
}
// Load and copy new tables.
@@ -4959,8 +5016,9 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages)
continue;
std::string dest_name(node->space->name);
dest_name.append(".new");
- xtrabackup_copy_datafile(node, 0, dest_name.c_str(), wf_write_through,
- corrupted_pages);
+ xtrabackup_copy_datafile(ds_data, ds_meta,
+ node, 0, dest_name.c_str(),
+ wf_write_through, *this);
}
datafiles_iter_free(it);
@@ -6075,7 +6133,7 @@ static bool xtrabackup_prepare_func(char** argv)
}
}
else
- corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE);
+ corrupted_pages.print_to_file(NULL, MB_CORRUPTED_PAGES_FILE);
if (ok) {
msg("Last binlog file %s, position %lld",
diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h
index 394ea9ed87c..e377c297f91 100644
--- a/extra/mariabackup/xtrabackup.h
+++ b/extra/mariabackup/xtrabackup.h
@@ -48,11 +48,13 @@ public:
bool contains(ulint space_id, unsigned page_no) const;
void drop_space(ulint space_id);
void rename_space(ulint space_id, const std::string &new_name);
- bool print_to_file(const char *file_name) const;
+ bool print_to_file(ds_ctxt *ds_data, const char *file_name) const;
void read_from_file(const char *file_name);
bool empty() const;
void zero_out_free_pages();
+ void backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta);
+
private:
void add_page_no_lock(const char *space_name, ulint space_id,
unsigned page_no, bool convert_space_name);
@@ -65,6 +67,7 @@ private:
container_t m_spaces;
};
+
/* value of the --incremental option */
extern lsn_t incremental_lsn;
@@ -73,13 +76,12 @@ extern char *xtrabackup_incremental_dir;
extern char *xtrabackup_incremental_basedir;
extern char *innobase_data_home_dir;
extern char *innobase_buffer_pool_filename;
+extern char *aria_log_dir_path;
extern char *xb_plugin_dir;
extern char *xb_rocksdb_datadir;
extern my_bool xb_backup_rocksdb;
extern uint opt_protocol;
-extern ds_ctxt_t *ds_meta;
-extern ds_ctxt_t *ds_data;
/* The last checkpoint LSN at the backup startup time */
extern lsn_t checkpoint_lsn_start;
@@ -179,7 +181,8 @@ extern ulong opt_binlog_info;
extern ulong xtrabackup_innodb_force_recovery;
void xtrabackup_io_throttling(void);
-my_bool xb_write_delta_metadata(const char *filename,
+my_bool xb_write_delta_metadata(ds_ctxt *ds_meta,
+ const char *filename,
const xb_delta_info_t *info);
/************************************************************************
diff --git a/include/my_alloca.h b/include/my_alloca.h
index 761c2adb890..85fa64e9558 100644
--- a/include/my_alloca.h
+++ b/include/my_alloca.h
@@ -32,7 +32,10 @@
#endif
#endif
-#if defined(HAVE_ALLOCA)
+#if defined(_AIX) && !defined(__GNUC__) && !defined(_AIX43)
+#pragma alloca
+#endif /* _AIX */
+
/*
If the GCC/LLVM compiler from the MinGW is used,
alloca may not be defined when using the MSVC CRT:
@@ -40,6 +43,5 @@
#if defined(__GNUC__) && !defined(HAVE_ALLOCA_H) && !defined(alloca)
#define alloca __builtin_alloca
#endif /* GNUC */
-#endif
#endif /* MY_ALLOCA_INCLUDED */
diff --git a/mysql-test/include/sql_mode_pad_char_to_full_length.inc b/mysql-test/include/sql_mode_pad_char_to_full_length.inc
new file mode 100644
index 00000000000..df03c4dbc28
--- /dev/null
+++ b/mysql-test/include/sql_mode_pad_char_to_full_length.inc
@@ -0,0 +1,31 @@
+--echo #
+--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+--echo #
+
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+
+
+SET sql_mode='';
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+SET sql_mode='pad_char_to_full_length';
+INSERT INTO t1 VALUES (0,0);
+DELETE FROM t1;
+DROP TABLE t1;
+
+
+SET sql_mode='';
+CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20));
+SHOW CREATE TABLE t1;
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/lib/My/CoreDump.pm b/mysql-test/lib/My/CoreDump.pm
index 05b6edf1385..be6d21146d1 100644
--- a/mysql-test/lib/My/CoreDump.pm
+++ b/mysql-test/lib/My/CoreDump.pm
@@ -310,16 +310,8 @@ sub cdb_check {
`cdb -? 2>&1`;
if ($? >> 8)
{
- print "Cannot find cdb. Please Install Debugging tools for Windows\n";
- print "from http://www.microsoft.com/whdc/devtools/debugging/";
- if($ENV{'ProgramW6432'})
- {
- print "install64bit.mspx (native x64 version)\n";
- }
- else
- {
- print "installx86.mspx\n";
- }
+ print "Cannot find the cdb debugger. Please install Debugging tools for Windows\n";
+ print "and set PATH environment variable to include location of cdb.exe";
}
}
@@ -328,25 +320,6 @@ sub _cdb {
my ($core_name, $format)= @_;
print "\nTrying 'cdb' to get a backtrace\n";
return unless -f $core_name;
-
- # Try to set environment for debugging tools for Windows
- if ($ENV{'PATH'} !~ /Debugging Tools/)
- {
- if ($ENV{'ProgramW6432'})
- {
- # On x64 computer
- $ENV{'PATH'}.= ";".$ENV{'ProgramW6432'}."\\Debugging Tools For Windows (x64)";
- }
- else
- {
- # On x86 computer. Newest versions of Debugging tools are installed in the
- # directory with (x86) suffix, older versions did not have this suffix.
- $ENV{'PATH'}.= ";".$ENV{'ProgramFiles'}."\\Debugging Tools For Windows (x86)";
- $ENV{'PATH'}.= ";".$ENV{'ProgramFiles'}."\\Debugging Tools For Windows";
- }
- }
-
-
# Read module list, find out the name of executable and
# build symbol path (required by cdb if executable was built on
# different machine)
@@ -384,7 +357,7 @@ sub _cdb {
if (!$ENV{'_NT_SYMBOL_PATH'})
{
my $windir= $ENV{'windir'};
- my $symbol_cache= substr($windir ,0, index($windir,'\\'))."\\cdb_symbols";
+ my $symbol_cache= substr($windir ,0, index($windir,'\\'))."\\symbols";
print "OS debug symbols will be downloaded and stored in $symbol_cache.\n";
print "You can control the location of symbol cache with _NT_SYMBOL_PATH\n";
diff --git a/mysql-test/main/bootstrap.result b/mysql-test/main/bootstrap.result
index 6801791221e..7cd8a851e56 100644
--- a/mysql-test/main/bootstrap.result
+++ b/mysql-test/main/bootstrap.result
@@ -1,15 +1,28 @@
-drop table if exists t1;
+#
+# test mysqld in bootstrap mode
+#
+#
+# Check that --bootstrap reads from stdin
+#
# Kill the server
# restart
drop table t1;
+#
+# Check that --bootstrap of file with SQL error returns error
+#
# Kill the server
# restart
drop table t1;
ERROR 42S02: Unknown table 'test.t1'
+#
+# Bootstrap with a large thd->net.max_packet
+#
# Kill the server
# restart
drop table t1;
-End of 5.1 tests
+#
+# End of 5.1 tests
+#
#
# Bug #11766306: 59393: HAVE_INNODB=YES WHEN MYSQLD
# STARTED WITH --SKIP-INNODB
@@ -18,9 +31,23 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
and SUPPORT='YES';
# Kill the server
+#
+# MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init
+#
+#
+# MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check
+#
# restart
-End of 5.5 tests
+#
+# End of 5.5 tests
+#
+#
+# Check that --bootstrap can install and uninstall plugins
+#
# Kill the server
+#
+# Check that installed plugins are *not* automatically loaded in --bootstrap
+#
# restart
flush tables;
show create table t1;
@@ -34,4 +61,16 @@ name dl
EXAMPLE ha_example.so
truncate table mysql.plugin;
# Kill the server
+#
+# MDEV-9969 mysql_install_db error processing ignore_db_dirs.
+#
+#
+# MDEV-13397 MariaDB upgrade fail when using default_time_zone
+#
+#
+# MDEV-30818 invalid ssl prevents bootstrap
+#
# restart
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/bootstrap.test b/mysql-test/main/bootstrap.test
index 1c8dafd8f28..8eee907e166 100644
--- a/mysql-test/main/bootstrap.test
+++ b/mysql-test/main/bootstrap.test
@@ -1,17 +1,20 @@
+--echo #
+--echo # test mysqld in bootstrap mode
+--echo #
--source include/not_embedded.inc
-#
-# test mysqld in bootstrap mode
-#
---disable_warnings
-drop table if exists t1;
---enable_warnings
+--source include/have_example_plugin.inc
+
+--let test_bootstrap=$MYSQLTEST_VARDIR/tmp/test_bootstrap.sql
+--write_file $test_bootstrap
+use test;
+EOF
# Add the datadir to the bootstrap command
let $MYSQLD_DATADIR= `select @@datadir`;
let $MYSQLD_BOOTSTRAP_CMD= $MYSQLD_BOOTSTRAP_CMD --datadir=$MYSQLD_DATADIR --tmpdir=$MYSQL_TMP_DIR --default-storage-engine=MyISAM --loose-skip-innodb --plugin-maturity=unknown;
-#
-# Check that --bootstrap reads from stdin
-#
+--echo #
+--echo # Check that --bootstrap reads from stdin
+--echo #
--write_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql
use test;
CREATE TABLE t1(a int);
@@ -21,9 +24,9 @@ EOF
--source include/start_mysqld.inc
drop table t1;
remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql;
-#
-# Check that --bootstrap of file with SQL error returns error
-#
+--echo #
+--echo # Check that --bootstrap of file with SQL error returns error
+--echo #
--write_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql
use test;
CREATE TABLE t1;
@@ -37,9 +40,9 @@ EOF
drop table t1;
remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql;
-#
-# Bootstrap with a large thd->net.max_packet
-#
+--echo #
+--echo # Bootstrap with a large thd->net.max_packet
+--echo #
--disable_query_log
create table t1 select 2 as a, concat(repeat('MySQL', @@max_allowed_packet/10), ';') as b;
eval select * into outfile '$MYSQLTEST_VARDIR/tmp/long_query.sql' from t1;
@@ -51,7 +54,9 @@ remove_file $MYSQLTEST_VARDIR/tmp/long_query.sql;
--source include/start_mysqld.inc
drop table t1;
---echo End of 5.1 tests
+--echo #
+--echo # End of 5.1 tests
+--echo #
--echo #
--echo # Bug #11766306: 59393: HAVE_INNODB=YES WHEN MYSQLD
@@ -63,29 +68,25 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
and SUPPORT='YES';
--source include/kill_mysqld.inc
-#
-# MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init
-#
+--echo #
+--echo # MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init
+--echo #
--error 1
--exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE
-#
-# MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check
-#
---write_file $MYSQLTEST_VARDIR/tmp/1
-use test;
-EOF
---exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/1 >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
---remove_file $MYSQLTEST_VARDIR/tmp/1
+--echo #
+--echo # MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check
+--echo #
+--exec $MYSQLD_BOOTSTRAP_CMD < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--source include/start_mysqld.inc
---echo End of 5.5 tests
+--echo #
+--echo # End of 5.5 tests
+--echo #
---source include/not_windows_embedded.inc
---source include/have_example_plugin.inc
-#
-# Check that --bootstrap can install and uninstall plugins
-#
+--echo #
+--echo # Check that --bootstrap can install and uninstall plugins
+--echo #
let $PLUGIN_DIR=`select @@plugin_dir`;
--source include/kill_mysqld.inc
--write_file $MYSQLTEST_VARDIR/tmp/install_plugin.sql
@@ -95,9 +96,9 @@ EOF
--exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$PLUGIN_DIR < $MYSQLTEST_VARDIR/tmp/install_plugin.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--remove_file $MYSQLTEST_VARDIR/tmp/install_plugin.sql
-#
-# Check that installed plugins are *not* automatically loaded in --bootstrap
-#
+--echo #
+--echo # Check that installed plugins are *not* automatically loaded in --bootstrap
+--echo #
--write_file $MYSQLTEST_VARDIR/tmp/bootstrap_plugins.sql
SET SQL_MODE="";
use test;
@@ -113,24 +114,24 @@ drop table t1;
select * from mysql.plugin;
truncate table mysql.plugin;
-
-#
-# MDEV-9969 mysql_install_db error processing ignore_db_dirs.
-#
---write_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
-use test;
-EOF
--source include/kill_mysqld.inc
---exec $MYSQLD_BOOTSTRAP_CMD --ignore-db-dirs='some_dir' --ignore-db-dirs='some_dir' < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
---remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
+--echo #
+--echo # MDEV-9969 mysql_install_db error processing ignore_db_dirs.
+--echo #
+--exec $MYSQLD_BOOTSTRAP_CMD --ignore-db-dirs='some_dir' --ignore-db-dirs='some_dir' < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
-#
-# MDEV-13397 MariaDB upgrade fail when using default_time_zone
-#
---write_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
-use test;
-EOF
---exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
---remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
+--echo #
+--echo # MDEV-13397 MariaDB upgrade fail when using default_time_zone
+--echo #
+--exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
+
+--echo #
+--echo # MDEV-30818 invalid ssl prevents bootstrap
+--echo #
+--exec $MYSQLD_BOOTSTRAP_CMD --ssl-ca=/dev/nonexistent < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--source include/start_mysqld.inc
+--echo #
+--echo # End of 10.3 tests
+--echo #
+--remove_file $test_bootstrap
diff --git a/mysql-test/main/ctype_uca_partitions.result b/mysql-test/main/ctype_uca_partitions.result
index d7b79046b34..373fe914527 100644
--- a/mysql-test/main/ctype_uca_partitions.result
+++ b/mysql-test/main/ctype_uca_partitions.result
@@ -84,3 +84,43 @@ O
P
Y
DROP TABLE t1;
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-30072 Wrong ORDER BY for a partitioned prefix key + NOPAD
+#
+SET NAMES utf8mb4;
+CREATE TABLE t1
+(
+id INT,
+data VARCHAR(20),
+KEY data_id (data,id)
+) COLLATE utf8mb3_unicode_nopad_ci ENGINE=MyISAM
+PARTITION BY RANGE COLUMNS (id)
+(
+PARTITION p10 VALUES LESS THAN (20),
+PARTITION p20 VALUES LESS THAN MAXVALUE
+);
+INSERT INTO t1 VALUES (30, 'ss '), (10, 'ß ');
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id;
+id
+10
+30
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC;
+id
+30
+10
+ALTER TABLE t1 DROP KEY data_id, ADD KEY data_id2(data(10),id);
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id;
+id
+10
+30
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC;
+id
+30
+10
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/ctype_uca_partitions.test b/mysql-test/main/ctype_uca_partitions.test
index 5734bb52008..81f1a091574 100644
--- a/mysql-test/main/ctype_uca_partitions.test
+++ b/mysql-test/main/ctype_uca_partitions.test
@@ -36,3 +36,35 @@ SELECT * FROM t1 PARTITION (p0) ORDER BY c1;
SELECT * FROM t1 PARTITION (p1) ORDER BY c1;
SELECT * FROM t1 PARTITION (p2) ORDER BY c1;
DROP TABLE t1;
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-30072 Wrong ORDER BY for a partitioned prefix key + NOPAD
+--echo #
+
+SET NAMES utf8mb4;
+CREATE TABLE t1
+(
+ id INT,
+ data VARCHAR(20),
+ KEY data_id (data,id)
+) COLLATE utf8mb3_unicode_nopad_ci ENGINE=MyISAM
+PARTITION BY RANGE COLUMNS (id)
+(
+ PARTITION p10 VALUES LESS THAN (20),
+ PARTITION p20 VALUES LESS THAN MAXVALUE
+);
+INSERT INTO t1 VALUES (30, 'ss '), (10, 'ß ');
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id;
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC;
+ALTER TABLE t1 DROP KEY data_id, ADD KEY data_id2(data(10),id);
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id;
+SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result
index 3576c1fa356..cda21e275ae 100644
--- a/mysql-test/main/derived.result
+++ b/mysql-test/main/derived.result
@@ -1330,5 +1330,28 @@ a b
DROP VIEW v1;
DROP TABLE t1;
#
+# MDEV-28616: derived table over union with order by clause that
+# contains subquery with unresolvable column reference
+#
+SELECT 1 FROM (
+SELECT 1 UNION SELECT 2 ORDER BY (SELECT 1 FROM DUAL WHERE xxx = 0)
+) dt;
+ERROR 42S22: Unknown column 'xxx' in 'where clause'
+create table t1 (a int, b int);
+insert into t1 values (3,8), (7,2), (1,4), (5,9);
+create table t2 (a int, b int);
+insert into t2 values (9,1), (7,3), (2,6);
+create table t3 (c int, d int);
+insert into t3 values (7,8), (1,2), (3,8);
+select * from
+(
+select a,b from t1 where t1.a > 3
+union
+select a,b from t2 where t2.b < 6
+order by (a - b / (select a + max(c) from t3 where d = x))
+) dt;
+ERROR 42S22: Unknown column 'x' in 'where clause'
+drop table t1,t2,t3;
+#
# End of 10.3 tests
#
diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test
index 67ea3c0d39a..756c2d62ed6 100644
--- a/mysql-test/main/derived.test
+++ b/mysql-test/main/derived.test
@@ -1142,5 +1142,35 @@ DROP VIEW v1;
DROP TABLE t1;
--echo #
+--echo # MDEV-28616: derived table over union with order by clause that
+--echo # contains subquery with unresolvable column reference
+--echo #
+
+--error ER_BAD_FIELD_ERROR
+SELECT 1 FROM (
+ SELECT 1 UNION SELECT 2 ORDER BY (SELECT 1 FROM DUAL WHERE xxx = 0)
+) dt;
+
+create table t1 (a int, b int);
+insert into t1 values (3,8), (7,2), (1,4), (5,9);
+
+create table t2 (a int, b int);
+insert into t2 values (9,1), (7,3), (2,6);
+
+create table t3 (c int, d int);
+insert into t3 values (7,8), (1,2), (3,8);
+
+--error ER_BAD_FIELD_ERROR
+select * from
+(
+ select a,b from t1 where t1.a > 3
+ union
+ select a,b from t2 where t2.b < 6
+ order by (a - b / (select a + max(c) from t3 where d = x))
+) dt;
+
+drop table t1,t2,t3;
+
+--echo #
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index cd05e24ce15..dac5a7f7fcb 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -8469,6 +8469,362 @@ SELECT a FROM t1 WHERE (a,b) in (SELECT @c,@d);
a
DROP TABLE t1;
#
+# MDEV-31085: multi-update using view with optimizer trace enabled
+#
+SET SESSION optimizer_trace = 'enabled=on';
+CREATE TABLE t (a int, b int);
+CREATE VIEW v AS SELECT 1 AS c UNION SELECT 2 AS c;
+INSERT INTO t VALUES (0,4),(5,6);
+UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a;
+SELECT * FROM information_schema.optimizer_trace;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a {
+ "steps": [
+ {
+ "view": {
+ "table": "v",
+ "select_id": 2,
+ "algorithm": "materialized"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select 1 AS c"
+ }
+ ]
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 3,
+ "steps": [
+ {
+ "expanded_query": "/* select#3 */ select 2 AS c"
+ }
+ ]
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "/* select#1 */ update t join v set t.b = t.a,t.a = v.c where v.c < t.a"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "v.c < t.a",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "v.c < t.a"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "v.c < t.a"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "v.c < t.a"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 2,
+ "steps": []
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 3,
+ "steps": []
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "<derived2>",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2.004394531
+ }
+ },
+ {
+ "table": "<derived2>",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 2.004394531,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "records": 2,
+ "cost": 2.004394531,
+ "uses_join_buffering": false
+ }
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 2.404394531,
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t"],
+ "table": "<derived2>",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 2,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "records": 2,
+ "cost": 2,
+ "uses_join_buffering": true
+ }
+ },
+ "rows_for_plan": 4,
+ "cost_for_plan": 5.204394531,
+ "estimated_join_cardinality": 4
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "<derived2>",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 2,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "records": 2,
+ "cost": 2,
+ "uses_join_buffering": false
+ }
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 2.4,
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["<derived2>"],
+ "table": "t",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 2.004394531,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "records": 2,
+ "cost": 2.004394531,
+ "uses_join_buffering": true
+ }
+ },
+ "rows_for_plan": 4,
+ "cost_for_plan": 5.204394531,
+ "pruned_by_cost": true
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "best_join_order": ["t", "<derived2>"]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "v.c < t.a",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t",
+ "attached": null
+ },
+ {
+ "table": "<derived2>",
+ "attached": "v.c < t.a"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_execution": {
+ "select_id": 2,
+ "steps": []
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 3,
+ "steps": []
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": "fake",
+ "steps": [
+ {
+ "expanded_query": "select c AS c from dual"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": "fake",
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "union",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "union",
+ "table_scan": {
+ "rows": 2,
+ "cost": 10.1
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "union",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 10.1,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "records": 2,
+ "cost": 10.1,
+ "uses_join_buffering": false
+ }
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 10.5,
+ "estimated_join_cardinality": 2
+ }
+ ]
+ },
+ {
+ "best_join_order": ["union"]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "union",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": "fake",
+ "steps": []
+ }
+ }
+ ]
+ }
+ }
+ ]
+} 0 0
+SELECT * FROM t;
+a b
+0 4
+1 5
+SET optimizer_trace=DEFAULT;
+DROP VIEW v;
+DROP TABLE t;
+#
# End of 10.4 tests
#
set optimizer_trace='enabled=on';
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index e8ddd2ed8aa..0d07a0e864f 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -678,6 +678,25 @@ SELECT a FROM t1 WHERE (a,b) in (SELECT @c,@d);
DROP TABLE t1;
--echo #
+--echo # MDEV-31085: multi-update using view with optimizer trace enabled
+--echo #
+
+SET SESSION optimizer_trace = 'enabled=on';
+
+CREATE TABLE t (a int, b int);
+CREATE VIEW v AS SELECT 1 AS c UNION SELECT 2 AS c;
+INSERT INTO t VALUES (0,4),(5,6);
+UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a;
+SELECT * FROM information_schema.optimizer_trace;
+
+SELECT * FROM t;
+
+SET optimizer_trace=DEFAULT;
+
+DROP VIEW v;
+DROP TABLE t;
+
+--echo #
--echo # End of 10.4 tests
--echo #
diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result
index f1131538242..b2585fd9eab 100644
--- a/mysql-test/main/parser.result
+++ b/mysql-test/main/parser.result
@@ -1910,12 +1910,32 @@ SET @@sql_mode=@save_sql_mode;
#
# MDEV-30151 parse error 1=2 not between/in
#
-select 1=2 not in (3,4);
-1=2 not in (3,4)
+SELECT 1=2 NOT IN (3,4);
+1=2 NOT IN (3,4)
1
-select 1=2 not between 3 and 4;
-1=2 not between 3 and 4
+SELECT 1=2 NOT BETWEEN 3 AND 4;
+1=2 NOT BETWEEN 3 AND 4
1
+CREATE TABLE t1 ( f INT AS ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f` int(11) GENERATED ALWAYS AS (1 = 2 not between 3 and 4) VIRTUAL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+DROP TABLE t1;
+CREATE TABLE t1 ( f INT, CHECK ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f` int(11) DEFAULT NULL,
+ CONSTRAINT `CONSTRAINT_1` CHECK (1 = 2 not between 3 and 4)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+DROP TABLE t1;
+CREATE VIEW v1 AS SELECT 1 IN ( 2 NOT BETWEEN 3 AND 4 );
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 = 2 not between 3 and 4 AS `1 IN ( 2 NOT BETWEEN 3 AND 4 )` latin1 latin1_swedish_ci
+DROP VIEW v1;
#
# End of 10.3 tests
#
diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test
index b21bbda233a..72975f51055 100644
--- a/mysql-test/main/parser.test
+++ b/mysql-test/main/parser.test
@@ -1683,8 +1683,21 @@ SET @@sql_mode=@save_sql_mode;
--echo #
--echo # MDEV-30151 parse error 1=2 not between/in
--echo #
-select 1=2 not in (3,4);
-select 1=2 not between 3 and 4;
+
+SELECT 1=2 NOT IN (3,4);
+SELECT 1=2 NOT BETWEEN 3 AND 4;
+
+CREATE TABLE t1 ( f INT AS ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) );
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 ( f INT, CHECK ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) );
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE VIEW v1 AS SELECT 1 IN ( 2 NOT BETWEEN 3 AND 4 );
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
--echo #
--echo # End of 10.3 tests
diff --git a/mysql-test/main/sql_mode_pad_char_to_full_length.result b/mysql-test/main/sql_mode_pad_char_to_full_length.result
new file mode 100644
index 00000000000..6f68aade613
--- /dev/null
+++ b/mysql-test/main/sql_mode_pad_char_to_full_length.result
@@ -0,0 +1,94 @@
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+#
+SET default_storage_engine=MyISAM;
+#
+# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+#
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+SET sql_mode='pad_char_to_full_length';
+INSERT INTO t1 VALUES (0,0);
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(20) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+SET default_storage_engine=MEMORY;
+#
+# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+#
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+SET sql_mode='pad_char_to_full_length';
+INSERT INTO t1 VALUES (0,0);
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(20) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+SET default_storage_engine=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/sql_mode_pad_char_to_full_length.test b/mysql-test/main/sql_mode_pad_char_to_full_length.test
new file mode 100644
index 00000000000..4d492bc1b70
--- /dev/null
+++ b/mysql-test/main/sql_mode_pad_char_to_full_length.test
@@ -0,0 +1,19 @@
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+--echo #
+
+SET default_storage_engine=MyISAM;
+--source include/sql_mode_pad_char_to_full_length.inc
+
+SET default_storage_engine=MEMORY;
+--source include/sql_mode_pad_char_to_full_length.inc
+
+SET default_storage_engine=DEFAULT;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/update.result b/mysql-test/main/update.result
index f5edf1c6be3..7b6426d2ec5 100644
--- a/mysql-test/main/update.result
+++ b/mysql-test/main/update.result
@@ -734,3 +734,32 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2;
ERROR 22007: Incorrect datetime value: '19' for column `test`.`t1`.`i1` at row 1
DROP TABLE t1,t2;
# End of MariaDB 10.2 tests
+#
+# MDEV-20773: UPDATE with LIKE predicate over non-indexed column
+# of VARCHAR type
+#
+create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin);
+insert into t1 values
+('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb');
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 49.22 Using where
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 4 50.00 Using where
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a1` = 'u' where `test`.`t1`.`a2` like 'xx%'
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+select * from t1;
+a1 a2
+aa zzz
+u xxaa
+ccc yyy
+u xxb
+drop table t1;
+# End of MariaDB 10.4 tests
diff --git a/mysql-test/main/update.test b/mysql-test/main/update.test
index 8a6949447ee..147d69d50c9 100644
--- a/mysql-test/main/update.test
+++ b/mysql-test/main/update.test
@@ -676,3 +676,26 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2;
DROP TABLE t1,t2;
--echo # End of MariaDB 10.2 tests
+
+--echo #
+--echo # MDEV-20773: UPDATE with LIKE predicate over non-indexed column
+--echo # of VARCHAR type
+--echo #
+
+create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin);
+insert into t1 values
+ ('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb');
+analyze table t1 persistent for all;
+
+explain extended
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+
+select * from t1;
+
+drop table t1;
+
+--echo # End of MariaDB 10.4 tests
diff --git a/mysql-test/suite/galera/r/MDEV-30955.result b/mysql-test/suite/galera/r/MDEV-30955.result
new file mode 100644
index 00000000000..2a090cb58bc
--- /dev/null
+++ b/mysql-test/suite/galera/r/MDEV-30955.result
@@ -0,0 +1,26 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t (a CHAR(1) KEY);
+START TRANSACTION;
+HANDLER t OPEN;
+disconnect node_1;
+connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+DROP TABLE t;
+BACKUP STAGE START;
+START TRANSACTION;
+disconnect node_1;
+connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY);
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+HANDLER t2 OPEN;
+connection node_2;
+INSERT INTO t1 VALUES(1);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1,t2;
diff --git a/mysql-test/suite/galera/r/mdev-26175.result b/mysql-test/suite/galera/r/mdev-26175.result
new file mode 100644
index 00000000000..f84244fe916
--- /dev/null
+++ b/mysql-test/suite/galera/r/mdev-26175.result
@@ -0,0 +1,24 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET sql_mode="no_zero_date";
+SET GLOBAL wsrep_max_ws_rows=1;
+CREATE TABLE t2 (a INT);
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TRIGGER tgr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (0);
+INSERT INTO t1 VALUES (0),(1);
+ERROR HY000: wsrep_max_ws_rows exceeded
+SELECT * FROM t1;
+a
+SELECT * FROM t2;
+a
+connection node_2;
+SELECT * FROM t1;
+a
+SELECT * FROM t2;
+a
+connection node_1;
+SET sql_mode=DEFAULT;
+SET GLOBAL wsrep_max_ws_rows=DEFAULT;
+DROP TRIGGER tgr;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera/t/MDEV-30955.test b/mysql-test/suite/galera/t/MDEV-30955.test
new file mode 100644
index 00000000000..18577120e83
--- /dev/null
+++ b/mysql-test/suite/galera/t/MDEV-30955.test
@@ -0,0 +1,70 @@
+#
+# MDEV-30955
+# Assertion `thd->mdl_context.is_lock_owner(MDL_key::TABLE,
+# table->s->db.str, table->s->table_name.str, MDL_SHARED)'
+# failed in close_thread_table()
+#
+
+--source include/galera_cluster.inc
+
+#
+# Test 1: Assertion thd->mdl_context.is_lock_owner()
+# failed in close_thread_table()
+#
+CREATE TABLE t (a CHAR(1) KEY);
+START TRANSACTION;
+HANDLER t OPEN;
+
+#
+# If bug is present the transaction will be aborted
+# through Wsrep_client_service::bf_rollback() and
+# release explicit locks too early. Later, during
+# THD::cleanup(), table t will be closed and the
+# THD is expected to be owner of the MDL lock that
+# was just released.
+#
+--disconnect node_1
+
+--connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1
+DROP TABLE t;
+
+
+#
+# Test 2: Similar issue reproduces also with BACKUP STAGE locks.
+# See comments in MDEV-25037
+#
+
+BACKUP STAGE START;
+START TRANSACTION;
+--disconnect node_1
+--connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1
+
+
+#
+# Test 3: Assertion `!thd->mdl_context.has_locks()' failed
+# in do_command()
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY);
+
+--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
+
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+HANDLER t2 OPEN;
+
+--connection node_2
+INSERT INTO t1 VALUES(1);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
+--source include/wait_condition.inc
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+DROP TABLE t1,t2;
diff --git a/mysql-test/suite/galera/t/galera_sequences.test b/mysql-test/suite/galera/t/galera_sequences.test
index faa3b46d2a7..613823d83e9 100644
--- a/mysql-test/suite/galera/t/galera_sequences.test
+++ b/mysql-test/suite/galera/t/galera_sequences.test
@@ -1,4 +1,5 @@
--source include/galera_cluster.inc
+--source include/have_innodb.inc
#
# MDEV-19353 : Alter Sequence do not replicate to another nodes with in Galera Cluster
diff --git a/mysql-test/suite/galera/t/mdev-26175.test b/mysql-test/suite/galera/t/mdev-26175.test
new file mode 100644
index 00000000000..1a3f1153e03
--- /dev/null
+++ b/mysql-test/suite/galera/t/mdev-26175.test
@@ -0,0 +1,27 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# MDEV-26175 : Assertion `! thd->in_sub_stmt' failed in bool trans_rollback_stmt(THD*)
+#
+--connection node_1
+SET sql_mode="no_zero_date";
+SET GLOBAL wsrep_max_ws_rows=1;
+CREATE TABLE t2 (a INT);
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TRIGGER tgr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (0);
+
+--error ER_ERROR_DURING_COMMIT
+INSERT INTO t1 VALUES (0),(1);
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+--connection node_2
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+--connection node_1
+SET sql_mode=DEFAULT;
+SET GLOBAL wsrep_max_ws_rows=DEFAULT;
+DROP TRIGGER tgr;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera_sr/r/MDEV-30862.result b/mysql-test/suite/galera_sr/r/MDEV-30862.result
new file mode 100644
index 00000000000..43da77f24df
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/MDEV-30862.result
@@ -0,0 +1,11 @@
+connection node_2;
+connection node_1;
+SET autocommit=0;
+SET SESSION wsrep_trx_fragment_size=1;
+CREATE TABLE t2 SELECT seq FROM seq_1_to_50;
+ERROR 42000: CREATE TABLE AS SELECT is not supported with streaming replication
+CREATE TABLE t1 (f1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES(DEFAULT);
+CREATE TABLE t2 SELECT * FROM t1;
+ERROR 42000: CREATE TABLE AS SELECT is not supported with streaming replication
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/MDEV-30862.test b/mysql-test/suite/galera_sr/t/MDEV-30862.test
new file mode 100644
index 00000000000..6be77b4d71b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/MDEV-30862.test
@@ -0,0 +1,24 @@
+#
+# MDEV-30862 Assertion `mode_ == m_high_priority' failed in
+# void wsrep::client_state::after_applying()
+#
+
+--source include/galera_cluster.inc
+--source include/have_sequence.inc
+
+SET autocommit=0;
+SET SESSION wsrep_trx_fragment_size=1;
+--error ER_NOT_ALLOWED_COMMAND
+CREATE TABLE t2 SELECT seq FROM seq_1_to_50;
+
+
+#
+# Same test without using seq
+#
+CREATE TABLE t1 (f1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES(DEFAULT);
+--error ER_NOT_ALLOWED_COMMAND
+CREATE TABLE t2 SELECT * FROM t1;
+
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/default_row_format_alter.result b/mysql-test/suite/innodb/r/default_row_format_alter.result
index 42cbab8a5f2..33936b59003 100644
--- a/mysql-test/suite/innodb/r/default_row_format_alter.result
+++ b/mysql-test/suite/innodb/r/default_row_format_alter.result
@@ -129,5 +129,25 @@ SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
ROW_FORMAT
Dynamic
DROP TABLE t1;
+#
+# MDEV-31025 Redundant table alter fails when fixed column
+# stored externally
+#
+set @old_sql_mode = @@sql_mode;
+SET @@sql_mode='';
+CREATE TABLE t1(pk INT,c CHAR(255),c2 CHAR(255),c3 CHAR(255),
+c4 char(255), c5 char(255), c6 char(255),
+c7 char(255), c8 char(255), primary key(pk)
+)Engine=InnoDB character set utf32 ROW_FORMAT=REDUNDANT;
+INSERT INTO t1(pk, c) VALUES (1, repeat('a', 255));
+ALTER TABLE t1 FORCE;
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT LENGTH(c) FROM t1;
+LENGTH(c)
+1020
+DROP TABLE t1;
+set @@sql_mode = @old_sql_mode;
# End of 10.4 tests
SET GLOBAL innodb_default_row_format = @row_format;
diff --git a/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result b/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result
new file mode 100644
index 00000000000..09c1cf57497
--- /dev/null
+++ b/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result
@@ -0,0 +1,51 @@
+SET default_storage_engine=InnoDB;
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+#
+#
+# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+#
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE TABLE t1 (a INT,b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+SET sql_mode='pad_char_to_full_length';
+INSERT INTO t1 VALUES (0,0);
+DELETE FROM t1;
+DROP TABLE t1;
+SET sql_mode='';
+CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(20) DEFAULT NULL,
+ `b` char(20) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+CREATE UNIQUE INDEX bi USING HASH ON t1 (b);
+INSERT INTO t1 VALUES (0,0);
+SET sql_mode='pad_char_to_full_length';
+DELETE FROM t1;
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/suite/innodb/t/default_row_format_alter.test b/mysql-test/suite/innodb/t/default_row_format_alter.test
index f5dd246efb5..5f2170454f3 100644
--- a/mysql-test/suite/innodb/t/default_row_format_alter.test
+++ b/mysql-test/suite/innodb/t/default_row_format_alter.test
@@ -150,6 +150,23 @@ ALTER TABLE t1 DROP b;
SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
DROP TABLE t1;
+--echo #
+--echo # MDEV-31025 Redundant table alter fails when fixed column
+--echo # stored externally
+--echo #
+set @old_sql_mode = @@sql_mode;
+SET @@sql_mode='';
+CREATE TABLE t1(pk INT,c CHAR(255),c2 CHAR(255),c3 CHAR(255),
+ c4 char(255), c5 char(255), c6 char(255),
+ c7 char(255), c8 char(255), primary key(pk)
+ )Engine=InnoDB character set utf32 ROW_FORMAT=REDUNDANT;
+INSERT INTO t1(pk, c) VALUES (1, repeat('a', 255));
+ALTER TABLE t1 FORCE;
+CHECK TABLE t1;
+SELECT LENGTH(c) FROM t1;
+DROP TABLE t1;
+set @@sql_mode = @old_sql_mode;
+
--echo # End of 10.4 tests
SET GLOBAL innodb_default_row_format = @row_format;
diff --git a/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test b/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test
new file mode 100644
index 00000000000..ba286c744d9
--- /dev/null
+++ b/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test
@@ -0,0 +1,18 @@
+--source include/have_innodb.inc
+
+SET default_storage_engine=InnoDB;
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic
+--echo #
+
+--source include/sql_mode_pad_char_to_full_length.inc
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.result b/mysql-test/suite/mariabackup/aria_log_dir_path.result
new file mode 100644
index 00000000000..1a877321bbe
--- /dev/null
+++ b/mysql-test/suite/mariabackup/aria_log_dir_path.result
@@ -0,0 +1,41 @@
+#
+# MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used
+#
+# Restart mariadbd with the test specific parameters
+# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path
+# Create and populate an Aria table (and Aria logs)
+CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria;
+BEGIN NOT ATOMIC
+FOR id IN 0..9 DO
+INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024));
+END FOR;
+END;
+$$
+# Testing aria log files before --backup
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+SHOW ENGINE aria logs;
+Type Name Status
+Aria aria_log.00000001 free
+Aria aria_log.00000002 in use
+# mariadb-backup --backup
+# mariadb-backup --prepare
+# shutdown server
+# remove datadir
+# remove aria-log-dir-path
+# mariadb-backup --copy-back
+# with parameters: --defaults-file=MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=MYSQLTEST_VARDIR/mysqld.1/data/ --target-dir=MYSQLTEST_VARDIR/tmp/backup --parallel=2 --throttle=1 --aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path
+# starting server
+# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path
+# Check that the table is there after --copy-back
+SELECT COUNT(*) from t1;
+COUNT(*)
+10
+DROP TABLE t1;
+# Testing aria log files after --copy-back
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+SHOW ENGINE aria logs;
+Type Name Status
+Aria aria_log.00000001 free
+Aria aria_log.00000002 in use
+# Restarting mariadbd with default parameters
+# restart
diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.test b/mysql-test/suite/mariabackup/aria_log_dir_path.test
new file mode 100644
index 00000000000..0178cd4eae5
--- /dev/null
+++ b/mysql-test/suite/mariabackup/aria_log_dir_path.test
@@ -0,0 +1,105 @@
+--source include/have_maria.inc
+
+--echo #
+--echo # MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used
+--echo #
+
+--let $datadir=`SELECT @@datadir`
+--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup
+
+if ($ARIA_LOGDIR_MARIADB == '')
+{
+ --let $ARIA_LOGDIR_MARIADB=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path
+}
+
+if ($ARIA_LOGDIR_FS == '')
+{
+ --let $ARIA_LOGDIR_FS=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path
+}
+
+--let $server_parameters=--aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=$ARIA_LOGDIR_MARIADB
+
+
+--echo # Restart mariadbd with the test specific parameters
+--mkdir $ARIA_LOGDIR_FS
+--let $restart_parameters=$server_parameters
+--source include/restart_mysqld.inc
+
+
+--echo # Create and populate an Aria table (and Aria logs)
+CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria;
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ FOR id IN 0..9 DO
+ INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024));
+ END FOR;
+END;
+$$
+DELIMITER ;$$
+
+
+--echo # Testing aria log files before --backup
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+--file_exists $ARIA_LOGDIR_FS/aria_log_control
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000001
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000002
+--error 1
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000003
+--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/
+SHOW ENGINE aria logs;
+
+
+--echo # mariadb-backup --backup
+--disable_result_log
+--mkdir $targetdir
+--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir
+--enable_result_log
+
+
+--echo # mariadb-backup --prepare
+--disable_result_log
+--exec $XTRABACKUP --prepare --target-dir=$targetdir
+--enable_result_log
+
+
+--echo # shutdown server
+--disable_result_log
+--source include/shutdown_mysqld.inc
+--echo # remove datadir
+--rmdir $datadir
+--echo # remove aria-log-dir-path
+--rmdir $ARIA_LOGDIR_FS
+
+--echo # mariadb-backup --copy-back
+--let $mariadb_backup_parameters=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$datadir --target-dir=$targetdir --parallel=2 --throttle=1 --aria-log-dir-path=$ARIA_LOGDIR_MARIADB
+--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--exec echo "# with parameters: $mariadb_backup_parameters"
+--exec $XTRABACKUP $mariadb_backup_parameters
+
+--echo # starting server
+--let $restart_parameters=$server_parameters
+--source include/start_mysqld.inc
+--enable_result_log
+--rmdir $targetdir
+
+
+--echo # Check that the table is there after --copy-back
+SELECT COUNT(*) from t1;
+DROP TABLE t1;
+
+
+--echo # Testing aria log files after --copy-back
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+--file_exists $ARIA_LOGDIR_FS/aria_log_control
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000001
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000002
+--error 1
+--file_exists $ARIA_LOGDIR_FS/aria_log.00000003
+--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/
+SHOW ENGINE aria logs;
+
+
+--echo # Restarting mariadbd with default parameters
+--let $restart_parameters=
+--source include/restart_mysqld.inc
+--rmdir $ARIA_LOGDIR_FS
diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result
new file mode 100644
index 00000000000..7fef26096e0
--- /dev/null
+++ b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result
@@ -0,0 +1,41 @@
+#
+# MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used
+#
+# Restart mariadbd with the test specific parameters
+# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel
+# Create and populate an Aria table (and Aria logs)
+CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria;
+BEGIN NOT ATOMIC
+FOR id IN 0..9 DO
+INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024));
+END FOR;
+END;
+$$
+# Testing aria log files before --backup
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+SHOW ENGINE aria logs;
+Type Name Status
+Aria aria_log.00000001 free
+Aria aria_log.00000002 in use
+# mariadb-backup --backup
+# mariadb-backup --prepare
+# shutdown server
+# remove datadir
+# remove aria-log-dir-path
+# mariadb-backup --copy-back
+# with parameters: --defaults-file=MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=MYSQLTEST_VARDIR/mysqld.1/data/ --target-dir=MYSQLTEST_VARDIR/tmp/backup --parallel=2 --throttle=1 --aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel
+# starting server
+# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel
+# Check that the table is there after --copy-back
+SELECT COUNT(*) from t1;
+COUNT(*)
+10
+DROP TABLE t1;
+# Testing aria log files after --copy-back
+SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/;
+SHOW ENGINE aria logs;
+Type Name Status
+Aria aria_log.00000001 free
+Aria aria_log.00000002 in use
+# Restarting mariadbd with default parameters
+# restart
diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test
new file mode 100644
index 00000000000..c8169959929
--- /dev/null
+++ b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test
@@ -0,0 +1,4 @@
+--let $ARIA_LOGDIR_MARIADB=../../tmp/backup_aria_log_dir_path_rel
+--let $ARIA_LOGDIR_FS=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path_rel
+
+--source aria_log_dir_path.test
diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh
index 6ec975f5a59..d6334052f24 100644
--- a/scripts/wsrep_sst_mariabackup.sh
+++ b/scripts/wsrep_sst_mariabackup.sh
@@ -340,6 +340,9 @@ get_transfer()
"Use workaround for socat $SOCAT_VERSION bug"
fi
fi
+ if check_for_version "$SOCAT_VERSION" '1.7.4'; then
+ tcmd="$tcmd,no-sni=1"
+ fi
fi
if [ "${sockopt#*,dhparam=}" = "$sockopt" ]; then
diff --git a/sql/handler.cc b/sql/handler.cc
index e0dd51376ad..926ef2f4f54 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -6837,7 +6837,13 @@ static int wsrep_after_row(THD *thd)
thd->wsrep_affected_rows > wsrep_max_ws_rows &&
wsrep_thd_is_local(thd))
{
- trans_rollback_stmt(thd) || trans_rollback(thd);
+ /*
+ If we are inside stored function or trigger we should not commit or
+ rollback current statement transaction. See comment in ha_commit_trans()
+ call for more information.
+ */
+ if (!thd->in_sub_stmt)
+ trans_rollback_stmt(thd) || trans_rollback(thd);
my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0));
DBUG_RETURN(ER_ERROR_DURING_COMMIT);
}
diff --git a/sql/log.cc b/sql/log.cc
index 7b512e1a479..76d93363dd7 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -413,7 +413,7 @@ private:
Rows_log_event *m_pending;
/*
- Bit flags for what has been writting to cache. Used to
+ Bit flags for what has been writing to cache. Used to
discard logs without any data changes.
see enum_logged_status;
*/
diff --git a/sql/log_event.h b/sql/log_event.h
index f7d34138d1c..5dcf0315f68 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -716,7 +716,7 @@ enum Log_event_type
/*
- Bit flags for what has been writting to cache. Used to
+ Bit flags for what has been writing to cache. Used to
discard logs with table map events but not row events and
nothing else important. This is stored by cache.
*/
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index dc24ad52e9a..a54575a2004 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -4433,7 +4433,10 @@ static void init_ssl()
{
sql_print_error("Failed to setup SSL");
sql_print_error("SSL error: %s", sslGetErrString(error));
- unireg_abort(1);
+ if (!opt_bootstrap)
+ unireg_abort(1);
+ opt_use_ssl = 0;
+ have_ssl= SHOW_OPTION_DISABLED;
}
else
ssl_acceptor_stats.init();
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 82b19e23fd4..7b6f373eea4 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -3554,7 +3554,10 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
}
else
{
+ enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
rows= records_in_column_ranges(&param, idx, key);
+ thd->count_cuted_fields= save_count_cuted_fields;
if (rows != DBL_MAX)
{
key->field->cond_selectivity= rows/table_records;
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 30a464d06e9..cd6b43da21d 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -787,6 +787,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
cursor->outer_join|= JOIN_TYPE_OUTER;
}
}
+ // Prevent it for possible ORDER BY clause
+ if (unit->fake_select_lex)
+ unit->fake_select_lex->context.outer_context= 0;
if (unlikely(thd->trace_started()))
{
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 62d33d2a007..4c7313265e8 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1306,7 +1306,7 @@ bool do_command(THD *thd)
in wsrep_before_command().
*/
WSREP_LOG_THD(thd, "enter found BF aborted");
- DBUG_ASSERT(!thd->mdl_context.has_locks());
+ DBUG_ASSERT(!thd->mdl_context.has_transactional_locks());
DBUG_ASSERT(!thd->get_stmt_da()->is_set());
/* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */
if (command == COM_STMT_EXECUTE)
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9b47ebc48f4..18e3d214e66 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -28323,7 +28323,7 @@ void st_select_lex::print_item_list(THD *thd, String *str,
outer_select() can not be used here because it is for name resolution
and will return NULL at any end of name resolution chain (view/derived)
*/
- bool top_level= (get_master()->get_master() == 0);
+ bool top_level= (get_master() == &thd->lex->unit);
List_iterator_fast<Item> it(item_list);
Item *item;
while ((item= it++))
@@ -28430,7 +28430,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
return;
}
- bool top_level= (get_master()->get_master() == 0);
+ bool top_level= (get_master() == &thd->lex->unit);
enum explainable_cmd_type sel_type= SELECT_CMD;
if (top_level)
sel_type= get_explainable_cmd_type(thd);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 15be1c66f2a..be46a7df732 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -12187,6 +12187,19 @@ bool Sql_cmd_create_table_like::execute(THD *thd)
}
#endif
+#ifdef WITH_WSREP
+ if (select_lex->item_list.elements && // With SELECT
+ WSREP(thd) && thd->variables.wsrep_trx_fragment_size > 0)
+ {
+ my_message(
+ ER_NOT_ALLOWED_COMMAND,
+ "CREATE TABLE AS SELECT is not supported with streaming replication",
+ MYF(0));
+ res= 1;
+ goto end_with_restore_list;
+ }
+#endif /* WITH_WSREP */
+
if (select_lex->item_list.elements || select_lex->tvc) // With select or TVC
{
select_result *result;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index d506eb489b3..a10119e2b23 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -346,9 +346,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
*/
%ifdef MARIADB
-%expect 82
+%expect 64
%else
-%expect 83
+%expect 65
%endif
/*
@@ -1166,7 +1166,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%left PREC_BELOW_NOT
-%nonassoc LOW_PRIORITY_NOT
+/* The precendence of boolean NOT is in fact here. See the comment below. */
+
%left '=' EQUAL_SYM GE '>' LE '<' NE
%nonassoc IS
%right BETWEEN_SYM
@@ -1178,6 +1179,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%left '*' '/' '%' DIV_SYM MOD_SYM
%left '^'
%left MYSQL_CONCAT_SYM
+/*
+ Boolean negation has a special branch in "expr" starting with NOT_SYM.
+ The precedence of logical negation is determined by the grammar itself
+ (without using Bison terminal symbol precedence) in this order
+ - Boolean factor (i.e. logical AND)
+ - Boolean NOT
+ - Boolean test (such as '=', IS NULL, IS TRUE)
+
+ But we also need a precedence for NOT_SYM in other contexts,
+ to shift (without reduce) in these cases:
+ predicate <here> NOT IN ...
+ predicate <here> NOT BETWEEN ...
+ predicate <here> NOT LIKE ...
+ predicate <here> NOT REGEXP ...
+ If the precedence of NOT_SYM was low, it would reduce immediately
+ after scanning "predicate" and then produce a syntax error on "NOT".
+*/
+%nonassoc NOT_SYM
%nonassoc NEG '~' NOT2_SYM BINARY
%nonassoc COLLATE_SYM
%nonassoc SUBQUERY_AS_EXPR
@@ -1447,6 +1466,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
literal insert_ident order_ident temporal_literal
simple_ident expr sum_expr in_sum_expr
variable variable_aux
+ boolean_test
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
primary_expr string_factor_expr mysql_concatenation_expr
@@ -9366,79 +9386,83 @@ expr:
MYSQL_YYABORT;
}
}
- | NOT_SYM expr %prec LOW_PRIORITY_NOT
+ | NOT_SYM expr
{
$$= negate_expression(thd, $2);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS TRUE_SYM %prec IS
+ | boolean_test %prec PREC_BELOW_NOT
+ ;
+
+boolean_test:
+ boolean_test IS TRUE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_istrue(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not TRUE_SYM %prec IS
+ | boolean_test IS not TRUE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnottrue(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS FALSE_SYM %prec IS
+ | boolean_test IS FALSE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isfalse(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not FALSE_SYM %prec IS
+ | boolean_test IS not FALSE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotfalse(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS UNKNOWN_SYM %prec IS
+ | boolean_test IS UNKNOWN_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not UNKNOWN_SYM %prec IS
+ | boolean_test IS not UNKNOWN_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS NULL_SYM %prec PREC_BELOW_NOT
+ | boolean_test IS NULL_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not NULL_SYM %prec IS
+ | boolean_test IS not NULL_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr EQUAL_SYM predicate %prec EQUAL_SYM
+ | boolean_test EQUAL_SYM predicate %prec EQUAL_SYM
{
$$= new (thd->mem_root) Item_func_equal(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr comp_op predicate %prec '='
+ | boolean_test comp_op predicate %prec '='
{
$$= (*$2)(0)->create(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr comp_op all_or_any '(' subselect ')' %prec '='
+ | boolean_test comp_op all_or_any '(' subselect ')' %prec '='
{
$$= all_any_subquery_creator(thd, $1, $2, $3, $5);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | predicate
+ | predicate %prec BETWEEN_SYM
;
predicate:
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
index e77f307f028..dc7972ed108 100644
--- a/sql/wsrep_client_service.cc
+++ b/sql/wsrep_client_service.cc
@@ -376,8 +376,6 @@ int Wsrep_client_service::bf_rollback()
m_thd->global_read_lock.unlock_global_read_lock(m_thd);
}
m_thd->release_transactional_locks();
- mysql_ull_cleanup(m_thd);
- m_thd->mdl_context.release_explicit_locks();
}
DBUG_RETURN(ret);
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
index d9988914c4d..53ef20f3e78 100644
--- a/sql/wsrep_high_priority_service.cc
+++ b/sql/wsrep_high_priority_service.cc
@@ -391,8 +391,6 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
m_thd->killed);
m_thd->release_transactional_locks();
- mysql_ull_cleanup(m_thd);
- m_thd->mdl_context.release_explicit_locks();
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h
index 2649a50cf76..a46615f5d6e 100644
--- a/storage/connect/catalog.h
+++ b/storage/connect/catalog.h
@@ -39,9 +39,9 @@ typedef struct _colinfo {
PCSZ Name;
int Type;
int Offset;
- int Length;
+ unsigned Length;
int Key;
- int Precision;
+ unsigned Precision;
int Scale;
int Opt;
int Freq;
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index cce05706cc9..775782bace2 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1618,10 +1618,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Scale= 0;
pcf->Opt= (fop) ? (int)fop->opt : 0;
- if (fp->field_length >= 0)
- pcf->Length= fp->field_length;
- else
- pcf->Length= 256; // BLOB?
+ pcf->Length= fp->field_length;
pcf->Precision= pcf->Length;
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 144d31735d6..786a53db5a2 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -91,11 +91,11 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char* tab, char* db, bool info)
/* directories are used (to make this even remotely secure). */
/*********************************************************************/
if (check_valid_path(module, strlen(module))) {
- strcpy(g->Message, "Module cannot contain a path");
+ safe_strcpy(g->Message, sizeof(g->Message), "Module cannot contain a path");
return NULL;
}
else if (strlen(subtype)+1+3 >= sizeof(getname)) {
- strcpy(g->Message, "Subtype string too long");
+ safe_strcpy(g->Message, sizeof(g->Message), "Subtype string too long");
return NULL;
}
else
@@ -118,7 +118,8 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char* tab, char* db, bool info)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
- strcat(strcat(g->Message, ": "), buf);
+ safe_strcat(g->Message, sizeof(g->Message), ": ");
+ safe_strcat(g->Message, sizeof(g->Message), buf);
return NULL;
} // endif hDll
@@ -281,7 +282,7 @@ char *RELDEF::GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef)
if (IsFileType(GetTypeID(ftype))) {
name= Hc->GetPartName();
sval= (char*)PlugSubAlloc(g, NULL, strlen(name) + 12);
- strcat(strcpy(sval, name), ".");
+ snprintf(sval, strlen(name) + 12, "%s.", name);
n= strlen(sval);
// Fold ftype to lower case
@@ -622,12 +623,11 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g)
/* directories are used (to make this even remotely secure). */
/*********************************************************************/
if (check_valid_path(Module, strlen(Module))) {
- strcpy(g->Message, "Module cannot contain a path");
+ safe_strcpy(g->Message, sizeof(g->Message), "Module cannot contain a path");
return NULL;
} else
// PlugSetPath(soname, Module, GetPluginDir()); // Crashes on Fedora
- strncat(strcpy(soname, GetPluginDir()), Module,
- sizeof(soname) - strlen(soname) - 1);
+ snprintf(soname, sizeof(soname), "%s%s", GetPluginDir(), Module);
#if defined(_WIN32)
// Is the DLL already loaded?
@@ -641,7 +641,8 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
- strcat(strcat(g->Message, ": "), buf);
+ safe_strcat(g->Message, sizeof(g->Message), ": ");
+ safe_strcat(g->Message, sizeof(g->Message), buf);
return NULL;
} // endif hDll
@@ -661,7 +662,8 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
- strcat(strcat(g->Message, ": "), buf);
+ safe_strcat(g->Message, sizeof(g->Message), ": ");
+ safe_strcat(g->Message, sizeof(g->Message), buf);
FreeLibrary((HMODULE)Hdll);
return NULL;
} // endif getdef
@@ -810,7 +812,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
else
txfp = new(g) ZLBFAM(defp);
#else // !GZ_SUPPORT
- strcpy(g->Message, "Compress not supported");
+ safe_strcpy(g->Message, sizeof(g->Message), "Compress not supported");
return NULL;
#endif // !GZ_SUPPORT
} else if (rfm == RECFM_VAR) {
@@ -833,7 +835,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
else
txfp = new(g) VCTFAM((PVCTDEF)defp);
#else // !VCT_SUPPORT
- strcpy(g->Message, "VCT no more supported");
+ safe_strcpy(g->Message, sizeof(g->Message), "VCT no more supported");
return NULL;
#endif // !VCT_SUPPORT
} // endif's
@@ -924,7 +926,7 @@ int COLDEF::Define(PGLOBAL g, void *, PCOLINFO cfp, int poff)
return -1;
} // endswitch
- strcpy(F.Type, GetFormatType(Buf_Type));
+ safe_strcpy(F.Type, sizeof(F.Type), GetFormatType(Buf_Type));
F.Length = cfp->Length;
F.Prec = cfp->Scale;
Offset = (cfp->Offset < 0) ? poff : cfp->Offset;
diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp
index 4cbb0e44e19..dfaf2284a72 100644
--- a/storage/connect/tabbson.cpp
+++ b/storage/connect/tabbson.cpp
@@ -39,6 +39,7 @@
#include "checklvl.h"
#include "resource.h"
#include "mycat.h" // for FNC_COL
+#include "m_string.h"
/***********************************************************************/
/* This should be an option. */
@@ -80,7 +81,7 @@ PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
} // endif info
if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
- strcpy(g->Message, "Cannot find column definition for multiple table");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot find column definition for multiple table");
return NULL;
} // endif Multiple
@@ -206,7 +207,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
tdp->Uri = (dsn && *dsn ? dsn : NULL);
if (!tdp->Fn && !tdp->Uri) {
- strcpy(g->Message, MSG(MISSING_FNAME));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME));
return 0;
} else
topt->subtype = NULL;
@@ -318,7 +319,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
switch (tjnp->ReadDB(g)) {
case RC_EF:
- strcpy(g->Message, "Void json table");
+ safe_strcpy(g->Message, sizeof(g->Message), "Void json table");
case RC_FX:
goto err;
default:
@@ -328,7 +329,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
} // endif pretty
if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) {
- strcpy(g->Message, "Can only retrieve columns from object rows");
+ safe_strcpy(g->Message, sizeof(g->Message), "Can only retrieve columns from object rows");
goto err;
} // endif row
@@ -405,7 +406,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
if (jvp && !bp->IsJson(jvp)) {
if (JsonAllPath() && !fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
jcol.Type = (JTYP)jvp->Type;
@@ -439,7 +440,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
jcol.Cbn = true;
} else if (j < lvl && !Stringified(strfy, colname)) {
if (!fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
p = fmt + strlen(fmt);
jsp = jvp;
@@ -510,11 +511,11 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
} else if (lvl >= 0) {
if (Stringified(strfy, colname)) {
if (!fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
- strcat(fmt, ".*");
+ safe_strcat(fmt, sizeof(fmt), ".*");
} else if (JsonAllPath() && !fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
jcol.Type = TYPE_STRG;
jcol.Len = sz;
@@ -961,7 +962,7 @@ PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n)
} // endif ars
if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) {
- strcpy(g->Message, "Logical error expanding array");
+ safe_strcpy(g->Message, sizeof(g->Message), "Logical error expanding array");
throw 666;
} // endif jvp
@@ -1146,7 +1147,7 @@ PBVAL BCUTIL::GetRow(PGLOBAL g)
} else if (row->Type == TYPE_JAR) {
AddArrayValue(row, (nwr = NewVal(type)));
} else {
- strcpy(g->Message, "Wrong type when writing new row");
+ safe_strcpy(g->Message, sizeof(g->Message), "Wrong type when writing new row");
nwr = NULL;
} // endif's
@@ -1255,7 +1256,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m)
// Allocate the parse work memory
G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 3 : 5));
} else {
- strcpy(g->Message, "LRECL is not defined");
+ safe_strcpy(g->Message, sizeof(g->Message), "LRECL is not defined");
return NULL;
} // endif Lrecl
@@ -1295,7 +1296,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m)
} else if (m == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's m
#else // !ZIP_SUPPORT
@@ -1325,10 +1326,10 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m)
if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (m == MODE_INSERT) {
- strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ safe_strcpy(g->Message, sizeof(g->Message), "INSERT supported only for zipped JSON when pretty=0");
return NULL;
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's m
#else // !ZIP_SUPPORT
@@ -1661,7 +1662,7 @@ bool TDBBSN::PrepareWriting(PGLOBAL g)
strcat(s, ",");
if ((signed)strlen(s) > Lrecl) {
- strncpy(To_Line, s, Lrecl);
+ safe_strcpy(To_Line, Lrecl, s);
snprintf(g->Message, sizeof(g->Message), "Line truncated (lrecl=%d)", Lrecl);
return PushWarning(g, this);
} else
@@ -1764,7 +1765,7 @@ bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
Xpd = true; // Expandable object
Nodes[i].Op = OP_EXP;
} else if (b) {
- strcpy(g->Message, "Cannot expand more than one branch");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot expand more than one branch");
return true;
} // endif Xcol
@@ -1975,7 +1976,7 @@ bool BSONCOL::ParseJpath(PGLOBAL g)
if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
return true;
else if (Xpd && Tbp->Mode == MODE_DELETE) {
- strcpy(g->Message, "Cannot delete expanded columns");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot delete expanded columns");
return true;
} // endif Xpd
@@ -2098,7 +2099,7 @@ void BSONCOL::ReadColumn(PGLOBAL g)
void BSONCOL::WriteColumn(PGLOBAL g)
{
if (Xpd && Tbp->Pretty < 2) {
- strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot write expanded column when Pretty is not 2");
throw 666;
} // endif Xpd
@@ -2128,7 +2129,7 @@ void BSONCOL::WriteColumn(PGLOBAL g)
char *s = Value->GetCharValue();
if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) {
- strcpy(g->Message, s);
+ safe_strcpy(g->Message, sizeof(g->Message), s);
throw 666;
} // endif jsp
@@ -2314,7 +2315,7 @@ int TDBBSON::MakeDocument(PGLOBAL g)
if (!a && *p && *p != '[' && !IsNum(p)) {
// obj is a key
if (jsp->Type != TYPE_JOB) {
- strcpy(g->Message, "Table path does not match the json file");
+ safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file");
return RC_FX;
} // endif Type
@@ -2340,7 +2341,7 @@ int TDBBSON::MakeDocument(PGLOBAL g)
} // endif p
if (jsp->Type != TYPE_JAR) {
- strcpy(g->Message, "Table path does not match the json file");
+ safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file");
return RC_FX;
} // endif Type
@@ -2434,7 +2435,7 @@ void TDBBSON::ResetSize(void)
int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
{
if (pxdf) {
- strcpy(g->Message, "JSON not indexable when pretty = 2");
+ safe_strcpy(g->Message, sizeof(g->Message), "JSON not indexable when pretty = 2");
return RC_FX;
} else
return RC_OK;
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 62eecb5e69e..0fdc182f6df 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -62,6 +62,7 @@
#include "tabmul.h"
#include "array.h"
#include "blkfil.h"
+#include "m_string.h"
/***********************************************************************/
/* DB static variables. */
@@ -258,7 +259,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf)
sep = GetBoolCatInfo("SepIndex", false);
if (!sep && pxdf) {
- strcpy(g->Message, MSG(NO_RECOV_SPACE));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(NO_RECOV_SPACE));
return true;
} // endif sep
@@ -293,7 +294,8 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf)
for (; pxdf; pxdf = pxdf->GetNext()) {
_splitpath(Ofn, drive, direc, fname, NULL);
- strcat(strcat(fname, "_"), pxdf->GetName());
+ safe_strcat(fname, sizeof(fname), "_");
+ safe_strcat(fname, sizeof(fname), pxdf->GetName());
_makepath(filename, drive, direc, fname, ftype);
PlugSetPath(filename, filename, GetPath());
#if defined(_WIN32)
@@ -312,7 +314,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf)
} else { // !sep
// Drop all indexes, delete the common file
PlugSetPath(filename, Ofn, GetPath());
- strcat(PlugRemoveType(filename, filename), ftype);
+ safe_strcat(PlugRemoveType(filename, filename), sizeof(filename), ftype);
#if defined(_WIN32)
if (!DeleteFile(filename))
rc = (GetLastError() != ERROR_FILE_NOT_FOUND);
@@ -365,7 +367,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
txfp = new(g) UZDFAM(this);
} else {
- strcpy(g->Message, "Zipped DBF tables are read only");
+ safe_strcpy(g->Message, sizeof(g->Message), "Zipped DBF tables are read only");
return NULL;
} // endif's mode
@@ -386,7 +388,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
} else if (mode == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's mode
@@ -397,7 +399,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
} else if (mode == MODE_INSERT) {
txfp = new(g) ZPXFAM(this);
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's mode
@@ -655,7 +657,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
if ((nrec = defp->GetElemt()) < 2) {
if (!To_Def->Partitioned()) {
// This may be wrong to do in some cases
- strcpy(g->Message, MSG(TABLE_NOT_OPT));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(TABLE_NOT_OPT));
return RC_INFO; // Not to be optimized
} else
return RC_OK;
@@ -675,7 +677,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
if ((block = (int)((MaxSize + (int)nrec - 1) / (int)nrec)) < 2) {
// This may be wrong to do in some cases
defp->RemoveOptValues(g);
- strcpy(g->Message, MSG(TABLE_NOT_OPT));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(TABLE_NOT_OPT));
return RC_INFO; // Not to be optimized
} // endif block
@@ -758,7 +760,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
// No optimised columns. Still useful for blocked variable tables.
if (!colp && defp->Recfm != RECFM_VAR) {
- strcpy(g->Message, "No optimised columns");
+ safe_strcpy(g->Message, sizeof(g->Message), "No optimised columns");
return RC_INFO;
} // endif colp
@@ -788,7 +790,8 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
/*********************************************************************/
char *p = (char *)PlugSubAlloc(g, NULL, 24 + strlen(Name));
- dup->Step = strcat(strcpy(p, MSG(OPTIMIZING)), Name);
+ snprintf(p, 24 + strlen(Name), "%s%s", MSG(OPTIMIZING), Name);
+ dup->Step = p;
dup->ProgMax = GetProgMax(g);
dup->ProgCur = 0;
#endif // SOCKET_MODE || THREAD
@@ -805,7 +808,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
} else {
if (++curnum >= nrec) {
if (++curblk >= block) {
- strcpy(g->Message, MSG(BAD_BLK_ESTIM));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(BAD_BLK_ESTIM));
goto err;
} else
curnum = 0;
@@ -833,7 +836,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g)
#if defined(PROG_INFO)
if (!dup->Step) {
- strcpy(g->Message, MSG(OPT_CANCELLED));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED));
goto err;
} else
dup->ProgCur = GetProgCur();
@@ -913,7 +916,8 @@ bool TDBDOS::SaveBlockValues(PGLOBAL g)
if (!(opfile = fopen(filename, "wb"))) {
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR),
"wb", (int)errno, filename);
- strcat(strcat(g->Message, ": "), strerror(errno));
+ safe_strcat(g->Message, sizeof(g->Message), ": ");
+ safe_strcat(g->Message, sizeof(g->Message), strerror(errno));
if (trace(1))
htrc("%s\n", g->Message);
@@ -1230,7 +1234,8 @@ bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec)
/* Initialize progress information */
/*********************************************************************/
p = (char *)PlugSubAlloc(g, NULL, 48 + strlen(Name));
- dup->Step = strcat(strcpy(p, MSG(GET_DIST_VALS)), Name);
+ snprintf(p, 48 + strlen(Name), "%s%s", MSG(GET_DIST_VALS), Name);
+ dup->Step = p;
dup->ProgMax = GetProgMax(g);
dup->ProgCur = 0;
@@ -1242,12 +1247,12 @@ bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec)
#if defined(SOCKET_MODE)
if (SendProgress(dup)) {
- strcpy(g->Message, MSG(OPT_CANCELLED));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED));
return true;
} else
#elif defined(THREAD)
if (!dup->Step) {
- strcpy(g->Message, MSG(OPT_CANCELLED));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED));
return true;
} else
#endif // THREAD
@@ -1528,7 +1533,7 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv)
} else if (n == 8 || n == 14) {
if (n == 8 && ctype != TYPE_LIST) {
// Should never happen
- strcpy(g->Message, "Block opt: bad constant");
+ safe_strcpy(g->Message, sizeof(g->Message), "Block opt: bad constant");
throw 99;
} // endif Conv
@@ -1686,7 +1691,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add)
// Are we are called from CreateTable or CreateIndex?
if (pxdf) {
if (!add && dfp->GetIndx()) {
- strcpy(g->Message, MSG(INDX_EXIST_YET));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(INDX_EXIST_YET));
return RC_FX;
} // endif To_Indx
@@ -1798,7 +1803,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add)
htrc("Exception %d: %s\n", n, g->Message);
rc = RC_FX;
} catch (const char *msg) {
- strcpy(g->Message, msg);
+ safe_strcpy(g->Message, sizeof(g->Message), msg);
rc = RC_FX;
} // end catch
@@ -1832,7 +1837,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted)
PKPDEF kdp;
if (!xdp && !(xdp = To_Xdp)) {
- strcpy(g->Message, "NULL dynamic index");
+ safe_strcpy(g->Message, sizeof(g->Message), "NULL dynamic index");
return true;
} else
dynamic = To_Filter && xdp->IsUnique() && xdp->IsDynamic();
@@ -1921,7 +1926,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted)
htrc("Exception %d: %s\n", n, g->Message);
brc = true;
} catch (const char *msg) {
- strcpy(g->Message, msg);
+ safe_strcpy(g->Message, sizeof(g->Message), msg);
brc = true;
} // end catch
@@ -2682,38 +2687,38 @@ void DOSCOL::WriteColumn(PGLOBAL g)
if (Ldz || Nod || Dcm >= 0) {
switch (Buf_Type) {
case TYPE_SHORT:
- strcpy(fmt, (Ldz) ? "%0*hd" : "%*.hd");
+ safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*hd" : "%*.hd");
i = 0;
if (Nod)
for (; i < Dcm; i++)
- strcat(fmt, "0");
+ safe_strcat(fmt, sizeof(fmt), "0");
len = sprintf(Buf, fmt, field - i, Value->GetShortValue());
break;
case TYPE_INT:
- strcpy(fmt, (Ldz) ? "%0*d" : "%*.d");
+ safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*d" : "%*.d");
i = 0;
if (Nod)
for (; i < Dcm; i++)
- strcat(fmt, "0");
+ safe_strcat(fmt,sizeof(fmt), "0");
len = sprintf(Buf, fmt, field - i, Value->GetIntValue());
break;
case TYPE_TINY:
- strcpy(fmt, (Ldz) ? "%0*d" : "%*.d");
+ safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*d" : "%*.d");
i = 0;
if (Nod)
for (; i < Dcm; i++)
- strcat(fmt, "0");
+ safe_strcat(fmt, sizeof(fmt), "0");
len = sprintf(Buf, fmt, field - i, Value->GetTinyValue());
break;
case TYPE_DOUBLE:
case TYPE_DECIM:
- strcpy(fmt, (Ldz) ? "%0*.*lf" : "%*.*lf");
+ safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*.*lf" : "%*.*lf");
len = field + ((Nod && Dcm) ? 1 : 0);
snprintf(Buf, len + 1, fmt, len, Dcm, Value->GetFloatValue());
len = strlen(Buf);
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index 96a9f70e4a3..f558cb04f4d 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -65,7 +65,7 @@ int CONDFIL::Init(PGLOBAL g, PHC hc)
while (alt) {
if (!(p = strchr(alt, '='))) {
- strcpy(g->Message, "Invalid alias list");
+ safe_strcpy(g->Message, sizeof(g->Message), "Invalid alias list");
rc = RC_FX;
break;
} // endif !p
@@ -126,7 +126,7 @@ EXTDEF::EXTDEF(void)
bool EXTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
if (g->Createas) {
- strcpy(g->Message,
+ safe_strcpy(g->Message, sizeof(g->Message),
"Multiple-table UPDATE/DELETE commands are not supported");
return true;
} // endif multi
@@ -349,7 +349,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g)
int n_placeholders = count_placeholders(Srcdef);
if (n_placeholders < 0)
{
- strcpy(g->Message, "MakeSQL: Wrong place holders specification");
+ safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Wrong place holders specification");
return true;
}
@@ -372,7 +372,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g)
Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1));
} else {
- strcpy(g->Message, "MakeSQL: Wrong place holders specification");
+ safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Wrong place holders specification");
return true;
} // endif's ph
@@ -466,7 +466,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
if (Quote) {
// Tabname can have both database and table identifiers, we need to parse
- if (res= strstr(buf, "."))
+ if ((res= strstr(buf, ".")))
{
// Parse schema
my_len= res - buf + 1;
@@ -513,7 +513,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
len += ((Mode == MODE_READX) ? 256 : 1);
if (Query->IsTruncated()) {
- strcpy(g->Message, "MakeSQL: Out of memory");
+ safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Out of memory");
return true;
} else
Query->Resize(len);
@@ -574,6 +574,7 @@ bool TDBEXT::MakeCommand(PGLOBAL g)
bool qtd = Quoted > 0;
char q = qtd ? *Quote : ' ';
int i = 0, k = 0;
+ size_t stmt_sz = 0;
// Make a lower case copy of the originale query and change
// back ticks to the data source identifier quoting character
@@ -585,26 +586,30 @@ bool TDBEXT::MakeCommand(PGLOBAL g)
p[7] = 0; // Remove where clause
Qrystr[(p - qrystr) + 7] = 0;
body = To_CondFil->Body;
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
- + strlen(body) + 64);
+ stmt_sz = strlen(qrystr) + strlen(body) + 64;
} else
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
+ stmt_sz = strlen(Qrystr) + 64;
+ stmt = (char*)PlugSubAlloc(g, NULL, stmt_sz);
// Check whether the table name is equal to a keyword
// If so, it must be quoted in the original query
- strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
+ snprintf(name, sizeof(name), " %s ", Name);
+ strlwr(name);
if (strstr(" update delete low_priority ignore quick from ", name)) {
if (Quote) {
- strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote));
+ snprintf(name, sizeof(name), "%s%s%s", Quote, Name, Quote);
+ strlwr(name);
k += 2;
} else {
- strcpy(g->Message, "Quoted must be specified");
+ safe_strcpy(g->Message, sizeof(g->Message), "Quoted must be specified");
return true;
} // endif Quote
- } else
- strlwr(strcpy(name, Name)); // Not a keyword
+ } else {
+ safe_strcpy(name, sizeof(name), Name); // Not a keyword
+ strlwr(name);
+ }
if ((p = strstr(qrystr, name))) {
for (i = 0; i < p - qrystr; i++)
@@ -618,21 +623,29 @@ bool TDBEXT::MakeCommand(PGLOBAL g)
schmp = Schema;
if (qtd && *(p - 1) == ' ') {
- if (schmp)
- strcat(strcat(stmt, schmp), ".");
+ if (schmp) {
+ safe_strcat(stmt, stmt_sz, schmp);
+ safe_strcat(stmt, stmt_sz, ".");
+ }
- strcat(strcat(strcat(stmt, Quote), TableName), Quote);
+ safe_strcat(stmt, stmt_sz, Quote);
+ safe_strcat(stmt, stmt_sz, TableName);
+ safe_strcat(stmt, stmt_sz, Quote);
} else {
if (schmp) {
if (qtd && *(p - 1) != ' ') {
stmt[i - 1] = 0;
- strcat(strcat(strcat(stmt, schmp), "."), Quote);
- } else
- strcat(strcat(stmt, schmp), ".");
+ safe_strcat(stmt, stmt_sz, schmp);
+ safe_strcat(stmt, stmt_sz, ".");
+ safe_strcat(stmt, stmt_sz, Quote);
+ } else {
+ safe_strcat(stmt, stmt_sz, schmp);
+ safe_strcat(stmt, stmt_sz, ".");
+ }
} // endif schmp
- strcat(stmt, TableName);
+ safe_strcat(stmt, stmt_sz, TableName);
} // endif's
i = (int)strlen(stmt);
@@ -644,7 +657,7 @@ bool TDBEXT::MakeCommand(PGLOBAL g)
RemoveConst(g, stmt);
if (body)
- strcat(stmt, body);
+ safe_strcat(stmt, stmt_sz, body);
} else {
snprintf(g->Message, sizeof(g->Message), "Cannot use this %s command",
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index 7edffc638fa..037a465af13 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -62,6 +62,7 @@
#define NO_FUNC
#include "plgcnx.h" // For DB types
#include "resource.h"
+#include "m_string.h"
/***********************************************************************/
/* This should be an option. */
@@ -137,7 +138,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
? strchr(tdp->Entry, '*') || strchr(tdp->Entry, '?')
: GetBooleanTableOption(g, topt, "Mulentries", false);
#else // !ZIP_SUPPORT
- strcpy(g->Message, "ZIP not supported by this version");
+ safe_strcpy(g->Message, sizeof(g->Message), "ZIP not supported by this version");
return NULL;
#endif // !ZIP_SUPPORT
} // endif // Zipped
@@ -145,7 +146,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
fn = tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
if (!tdp->Fn) {
- strcpy(g->Message, MSG(MISSING_FNAME));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME));
return NULL;
} // endif Fn
@@ -472,7 +473,7 @@ bool CSVDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
if (Catfunc == FNC_NO)
for (PCOLDEF cdp = To_Cols; cdp; cdp = cdp->GetNext())
if (cdp->GetOffset() < 1 && !cdp->IsSpecial()) {
- strcpy(g->Message, MSG(BAD_OFFSET_VAL));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(BAD_OFFSET_VAL));
return true;
} // endif Offset
@@ -528,11 +529,11 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode)
} else if (mode == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's mode
#else // !ZIP_SUPPORT
- strcpy(g->Message, "ZIP not supported");
+ safe_strcpy(g->Message, sizeof(g->Message), "ZIP not supported");
return NULL;
#endif // !ZIP_SUPPORT
} else if (map) {
@@ -546,7 +547,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode)
txfp = new(g) ZLBFAM(this);
#else // !GZ_SUPPORT
- strcpy(g->Message, "Compress not supported");
+ safe_strcpy(g->Message, sizeof(g->Message), "Compress not supported");
return NULL;
#endif // !GZ_SUPPORT
} else
@@ -878,7 +879,7 @@ bool TDBCSV::SkipHeader(PGLOBAL g)
if (q)
To_Line[strlen(To_Line)] = Qot;
- strcat(To_Line, cdp->GetName());
+ safe_strcat(To_Line, Lrecl, cdp->GetName());
if (q)
To_Line[strlen(To_Line)] = Qot;
@@ -1048,14 +1049,16 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
for (i = 0; i < Fields; i++) {
if (i)
- strcat(To_Line, sep);
+ safe_strcat(To_Line, Lrecl, sep);
if (Field[i]) {
if (!strlen(Field[i])) {
// Generally null fields are not quoted
- if (Quoted > 2)
+ if (Quoted > 2) {
// Except if explicitly required
- strcat(strcat(To_Line, qot), qot);
+ safe_strcat(To_Line, Lrecl, qot);
+ safe_strcat(To_Line, Lrecl, qot);
+ }
} else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot
|| Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) {
@@ -1074,12 +1077,15 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
To_Line[k++] = Qot;
To_Line[k] = '\0';
- } else
- strcat(strcat(strcat(To_Line, qot), Field[i]), qot);
+ } else {
+ safe_strcat(To_Line, Lrecl, qot);
+ safe_strcat(To_Line, Lrecl, Field[i]);
+ safe_strcat(To_Line, Lrecl, qot);
+ }
}
else
- strcat(To_Line, Field[i]);
+ safe_strcat(To_Line, Lrecl, Field[i]);
}
} // endfor i
@@ -1156,7 +1162,7 @@ int TDBCSV::CheckWrite(PGLOBAL g)
} // endif
}
if ((nlen += n) > maxlen) {
- strcpy(g->Message, MSG(LINE_TOO_LONG));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(LINE_TOO_LONG));
return -1;
} // endif nlen
@@ -1266,7 +1272,7 @@ bool TDBFMT::OpenDB(PGLOBAL g)
} // endif n
FldFormat[i] = (PSZ)PlugSubAlloc(g, NULL, n + 5);
- strcpy(FldFormat[i], pfm);
+ safe_strcpy(FldFormat[i], n + 5, pfm);
if (!strcmp(pfm + n, "%m")) {
// This is a field that can be missing. Flag it so it can
@@ -1276,7 +1282,7 @@ bool TDBFMT::OpenDB(PGLOBAL g)
} else if (i+1 < Fields && strcmp(pfm + n, "%n")) {
// There are trailing characters after the field contents
// add a marker for the next field start position.
- strcat(FldFormat[i], "%n");
+ safe_strcat(FldFormat[i], n + 5, "%n");
FmtTest[i] = 1;
} // endif's
diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp
index 46fb7695a51..0242832b02f 100644
--- a/storage/connect/tabjdbc.cpp
+++ b/storage/connect/tabjdbc.cpp
@@ -277,7 +277,7 @@ PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m)
if (Multiple == 1)
tdbp = new(g)TDBMUL(tdbp);
else if (Multiple == 2)
- strcpy(g->Message, "NO_JDBC_MUL");
+ safe_strcpy(g->Message, sizeof(g->Message), "NO_JDBC_MUL");
} // endswitch Catfunc
@@ -386,7 +386,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
for (colp = Columns; colp; colp = colp->GetNext())
if (colp->IsSpecial()) {
- strcpy(g->Message, "No JDBC special columns");
+ safe_strcpy(g->Message, sizeof(g->Message), "No JDBC special columns");
return true;
} else {
// Column name can be encoded in UTF-8
@@ -460,7 +460,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
} // endfor colp
if ((Query->Append(") VALUES ("))) {
- strcpy(g->Message, "MakeInsert: Out of memory");
+ safe_strcpy(g->Message, sizeof(g->Message), "MakeInsert: Out of memory");
return true;
} else // in case prepared statement fails
pos = Query->GetLength();
@@ -470,7 +470,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
Query->Append("?,");
if (Query->IsTruncated()) {
- strcpy(g->Message, "MakeInsert: Out of memory");
+ safe_strcpy(g->Message, sizeof(g->Message), "MakeInsert: Out of memory");
return true;
} else
Query->RepLast(')');
@@ -532,12 +532,15 @@ int TDBJDBC::Cardinality(PGLOBAL g)
// Table name can be encoded in UTF-8
Decode(TableName, tbn, sizeof(tbn));
- strcpy(qry, "SELECT COUNT(*) FROM ");
+ safe_strcpy(qry, sizeof(qry), "SELECT COUNT(*) FROM ");
- if (Quote)
- strcat(strcat(strcat(qry, Quote), tbn), Quote);
+ if (Quote) {
+ safe_strcat(qry, sizeof(qry), Quote);
+ safe_strcat(qry, sizeof(qry), tbn);
+ safe_strcat(qry, sizeof(qry), Quote);
+ }
else
- strcat(qry, tbn);
+ safe_strcat(qry, sizeof(qry), tbn);
// Allocate a Count(*) column (must not use the default constructor)
Cnp = new(g)JDBCCOL;
@@ -656,7 +659,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g)
if ((Qrp = Jcp->AllocateResult(g, this)))
Memory = 2; // Must be filled
else {
- strcpy(g->Message, "Result set memory allocation failed");
+ safe_strcpy(g->Message, sizeof(g->Message), "Result set memory allocation failed");
return true;
} // endif n
@@ -683,7 +686,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g)
#if 0
if (!(rc = MakeInsert(g))) {
if (Nparm != Jcp->PrepareSQL(Query->GetStr())) {
- strcpy(g->Message, MSG(PARM_CNT_MISS));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(PARM_CNT_MISS));
rc = true;
} else
rc = BindParameters(g);
@@ -735,12 +738,12 @@ bool TDBJDBC::SetRecpos(PGLOBAL g, int recpos)
CurNum = recpos;
Fpos = recpos;
} else {
- strcpy(g->Message, "Scrolling out of row set NIY");
+ safe_strcpy(g->Message, sizeof(g->Message), "Scrolling out of row set NIY");
return true;
} // endif recpos
} else {
- strcpy(g->Message, "This action requires a scrollable cursor");
+ safe_strcpy(g->Message, sizeof(g->Message), "This action requires a scrollable cursor");
return true;
} // endif's
@@ -786,7 +789,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
if (To_CondFil)
if (Query->Append(" AND ") || Query->Append(To_CondFil->Body)) {
- strcpy(g->Message, "Readkey: Out of memory");
+ safe_strcpy(g->Message, sizeof(g->Message), "Readkey: Out of memory");
return true;
} // endif Append
@@ -919,7 +922,7 @@ int TDBJDBC::WriteDB(PGLOBAL g)
} // endfor colp
if (unlikely(Query->IsTruncated())) {
- strcpy(g->Message, "WriteDB: Out of memory");
+ safe_strcpy(g->Message, sizeof(g->Message), "WriteDB: Out of memory");
return RC_FX;
} // endif Query
@@ -1112,13 +1115,13 @@ PCMD TDBXJDC::MakeCMD(PGLOBAL g)
(To_CondFil->Op == OP_EQ || To_CondFil->Op == OP_IN)) {
xcmd = To_CondFil->Cmds;
} else
- strcpy(g->Message, "Invalid command specification filter");
+ safe_strcpy(g->Message, sizeof(g->Message), "Invalid command specification filter");
} else
- strcpy(g->Message, "No command column in select list");
+ safe_strcpy(g->Message, sizeof(g->Message), "No command column in select list");
} else if (!Srcdef)
- strcpy(g->Message, "No Srcdef default command");
+ safe_strcpy(g->Message, sizeof(g->Message), "No Srcdef default command");
else
xcmd = new(g) CMD(g, Srcdef);
@@ -1149,7 +1152,7 @@ bool TDBXJDC::OpenDB(PGLOBAL g)
this, Tdb_No, Use, Mode);
if (Use == USE_OPEN) {
- strcpy(g->Message, "Multiple execution is not allowed");
+ safe_strcpy(g->Message, sizeof(g->Message), "Multiple execution is not allowed");
return true;
} // endif use
@@ -1171,7 +1174,7 @@ bool TDBXJDC::OpenDB(PGLOBAL g)
Use = USE_OPEN; // Do it now in case we are recursively called
if (Mode != MODE_READ && Mode != MODE_READX) {
- strcpy(g->Message, "No INSERT/DELETE/UPDATE of XJDBC tables");
+ safe_strcpy(g->Message, sizeof(g->Message), "No INSERT/DELETE/UPDATE of XJDBC tables");
return true;
} // endif Mode
@@ -1224,7 +1227,7 @@ int TDBXJDC::ReadDB(PGLOBAL g)
/***********************************************************************/
int TDBXJDC::WriteDB(PGLOBAL g)
{
- strcpy(g->Message, "Execsrc tables are read only");
+ safe_strcpy(g->Message, sizeof(g->Message), "Execsrc tables are read only");
return RC_FX;
} // end of DeleteDB
@@ -1233,7 +1236,7 @@ int TDBXJDC::WriteDB(PGLOBAL g)
/***********************************************************************/
int TDBXJDC::DeleteDB(PGLOBAL g, int irc)
{
- strcpy(g->Message, "NO_XJDBC_DELETE");
+ safe_strcpy(g->Message, sizeof(g->Message), "NO_XJDBC_DELETE");
return RC_FX;
} // end of DeleteDB
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index ec76d82d917..02bdf4ea9b7 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -85,7 +85,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
} // endif info
if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
- strcpy(g->Message, "Cannot find column definition for multiple table");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot find column definition for multiple table");
return NULL;
} // endif Multiple
@@ -212,7 +212,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
tdp->Uri = (dsn && *dsn ? dsn : NULL);
if (!tdp->Fn && !tdp->Uri) {
- strcpy(g->Message, MSG(MISSING_FNAME));
+ safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME));
return 0;
} else
topt->subtype = NULL;
@@ -320,7 +320,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
switch (tjnp->ReadDB(g)) {
case RC_EF:
- strcpy(g->Message, "Void json table");
+ safe_strcpy(g->Message, sizeof(g->Message), "Void json table");
case RC_FX:
goto err;
default:
@@ -333,7 +333,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
} // endif pretty
if (!(row = (jsp) ? jsp->GetObject() : NULL)) {
- strcpy(g->Message, "Can only retrieve columns from object rows");
+ safe_strcpy(g->Message, sizeof(g->Message), "Can only retrieve columns from object rows");
goto err;
} // endif row
@@ -417,7 +417,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
if (jvp && jvp->DataType != TYPE_JSON) {
if (JsonAllPath() && !fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
jcol.Type = jvp->DataType;
@@ -450,7 +450,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
jcol.Cbn = true;
} else if (j < lvl && !Stringified(strfy, colname)) {
if (!fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
p = fmt + strlen(fmt);
jsp = jvp->GetJson();
@@ -520,11 +520,11 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} else if (lvl >= 0) {
if (Stringified(strfy, colname)) {
if (!fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
- strcat(fmt, ".*");
+ safe_strcat(fmt, sizeof(fmt), ".*");
} else if (JsonAllPath() && !fmt[bf])
- strcat(fmt, colname);
+ safe_strcat(fmt, sizeof(fmt), colname);
jcol.Type = TYPE_STRG;
jcol.Len = sz;
@@ -735,7 +735,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
} else if (m == MODE_INSERT) {
txfp = new(g) ZIPFAM(this);
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's m
#else // !ZIP_SUPPORT
@@ -775,7 +775,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
#endif // 0
((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 12 : 4));
} else {
- strcpy(g->Message, "LRECL is not defined");
+ safe_strcpy(g->Message, sizeof(g->Message), "LRECL is not defined");
return NULL;
} // endif Lrecl
@@ -785,10 +785,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
txfp = new(g) UNZFAM(this);
} else if (m == MODE_INSERT) {
- strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ safe_strcpy(g->Message, sizeof(g->Message), "INSERT supported only for zipped JSON when pretty=0");
return NULL;
} else {
- strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP");
return NULL;
} // endif's m
#else // !ZIP_SUPPORT
@@ -1145,7 +1145,7 @@ int TDBJSN::ReadDB(PGLOBAL g) {
M = 1;
rc = RC_OK;
} else if (Pretty != 1 || strcmp(To_Line, "]")) {
- strcpy(g->Message, G->Message);
+ safe_strcpy(g->Message, sizeof(g->Message), G->Message);
rc = RC_FX;
} else
rc = RC_EF;
@@ -1258,7 +1258,7 @@ bool TDBJSN::PrepareWriting(PGLOBAL g)
strcat(s, ",");
if ((signed)strlen(s) > Lrecl) {
- strncpy(To_Line, s, Lrecl);
+ safe_strcpy(To_Line, Lrecl, s);
snprintf(g->Message, sizeof(g->Message), "Line truncated (lrecl=%d)", Lrecl);
return PushWarning(g, this);
} else
@@ -1360,7 +1360,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
Xpd = true; // Expandable object
Nodes[i].Op = OP_EXP;
} else if (b) {
- strcpy(g->Message, "Cannot expand more than one branch");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot expand more than one branch");
return true;
} // endif Xcol
@@ -1571,7 +1571,7 @@ bool JSONCOL::ParseJpath(PGLOBAL g)
if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
return true;
else if (Xpd && Tjp->Mode == MODE_DELETE) {
- strcpy(g->Message, "Cannot delete expanded columns");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot delete expanded columns");
return true;
} // endif Xpd
@@ -1675,7 +1675,7 @@ PSZ JSONCOL::GetJpath(PGLOBAL g, bool proj)
PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n)
{
if (Value->IsTypeNum()) {
- strcpy(g->Message, "Cannot make Json for a numeric column");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot make Json for a numeric column");
if (!Warned) {
PushWarning(g, Tjp);
@@ -1690,10 +1690,10 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n)
ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500;
PBSON bsp = JbinAlloc(g, NULL, len, jsp);
- strcat(bsp->Msg, " column");
+ safe_strcat(bsp->Msg, sizeof(bsp->Msg), " column");
((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON));
} else {
- strcpy(g->Message, "Column size too small");
+ safe_strcpy(g->Message, sizeof(g->Message), "Column size too small");
Value->SetValue_char(NULL, 0);
} // endif Clen
#endif // 0
@@ -1935,7 +1935,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
} // endif ars
if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) {
- strcpy(g->Message, "Logical error expanding array");
+ safe_strcpy(g->Message, sizeof(g->Message), "Logical error expanding array");
throw 666;
} // endif jvp
@@ -2123,7 +2123,7 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr));
((PJAR)row)->InitArray(G);
} else {
- strcpy(g->Message, "Wrong type when writing new row");
+ safe_strcpy(g->Message, sizeof(g->Message), "Wrong type when writing new row");
nwr = NULL;
} // endif's
@@ -2144,7 +2144,7 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
void JSONCOL::WriteColumn(PGLOBAL g)
{
if (Xpd && Tjp->Pretty < 2) {
- strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
+ safe_strcpy(g->Message, sizeof(g->Message), "Cannot write expanded column when Pretty is not 2");
throw 666;
} // endif Xpd
@@ -2180,7 +2180,7 @@ void JSONCOL::WriteColumn(PGLOBAL g)
if (s && *s) {
if (!(jsp = ParseJson(G, s, strlen(s)))) {
- strcpy(g->Message, s);
+ safe_strcpy(g->Message, sizeof(g->Message), s);
throw 666;
} // endif jsp
@@ -2363,7 +2363,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
if (!a && *p && *p != '[' && !IsNum(p)) {
// obj is a key
if (jsp->GetType() != TYPE_JOB) {
- strcpy(g->Message, "Table path does not match the json file");
+ safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file");
return RC_FX;
} // endif Type
@@ -2389,7 +2389,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
} // endif p
if (jsp->GetType() != TYPE_JAR) {
- strcpy(g->Message, "Table path does not match the json file");
+ safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file");
return RC_FX;
} // endif Type
@@ -2484,7 +2484,7 @@ void TDBJSON::ResetSize(void)
int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
{
if (pxdf) {
- strcpy(g->Message, "JSON not indexable when pretty = 2");
+ safe_strcpy(g->Message, sizeof(g->Message), "JSON not indexable when pretty = 2");
return RC_FX;
} else
return RC_OK;
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 498ec71a87f..7265b2ed0ca 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -163,9 +163,9 @@ PCSZ GetTypeName(int type)
/***********************************************************************/
/* GetTypeSize: returns the PlugDB internal type size. */
/***********************************************************************/
-int GetTypeSize(int type, int len)
- {
- switch (type) {
+unsigned GetTypeSize(int type, unsigned len)
+{
+ switch (type) {
case TYPE_DECIM:
case TYPE_BIN:
case TYPE_STRING: len = len * sizeof(char); break;
@@ -176,7 +176,7 @@ int GetTypeSize(int type, int len)
case TYPE_DOUBLE: len = sizeof(double); break;
case TYPE_TINY: len = sizeof(char); break;
case TYPE_PCHAR: len = sizeof(char*); break;
- default: len = -1;
+ default: len = 0;
} // endswitch type
return len;
diff --git a/storage/connect/value.h b/storage/connect/value.h
index a0d947347c3..7eb0dec29f2 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -41,7 +41,7 @@ typedef struct _datpar *PDTP; // For DTVAL
/***********************************************************************/
// Exported functions
DllExport PCSZ GetTypeName(int);
-DllExport int GetTypeSize(int, int);
+DllExport unsigned GetTypeSize(int, unsigned);
#ifdef ODBC_SUPPORT
/* This function is exported for use in OEM table type DLLs */
DllExport int TranslateSQLType(int stp, int prec,
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 6fd9686304c..70d2178a5af 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -4464,7 +4464,7 @@ n_field_mismatch:
len -= BTR_EXTERN_FIELD_REF_SIZE;
ulint extern_len = mach_read_from_4(
data + len + BTR_EXTERN_LEN + 4);
- if (fixed_size == extern_len) {
+ if (fixed_size == extern_len + len) {
goto next_field;
}
}
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index d0931de9614..a4eb9f66fb6 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -852,7 +852,7 @@ fts_drop_index(
dberr_t err = DB_SUCCESS;
ut_a(indexes);
-
+ ut_d(dict_sys.assert_locked());
if ((ib_vector_size(indexes) == 1
&& (index == static_cast<dict_index_t*>(
ib_vector_getp(table->fts->indexes, 0)))
@@ -865,7 +865,9 @@ fts_drop_index(
current_doc_id = table->fts->cache->next_doc_id;
first_doc_id = table->fts->cache->first_doc_id;
+ rw_lock_x_lock(&table->fts->cache->init_lock);
fts_cache_clear(table->fts->cache);
+ rw_lock_x_unlock(&table->fts->cache->init_lock);
fts_cache_destroy(table->fts->cache);
table->fts->cache = fts_cache_create(table);
table->fts->cache->next_doc_id = current_doc_id;
@@ -4180,9 +4182,15 @@ fts_sync_commit(
/* We need to do this within the deleted lock since fts_delete() can
attempt to add a deleted doc id to the cache deleted id array. */
+ mutex_enter(&dict_sys.mutex);
+ sync->table->fts->dict_locked = true;
+ rw_lock_x_lock(&cache->init_lock);
fts_cache_clear(cache);
DEBUG_SYNC_C("fts_deleted_doc_ids_clear");
fts_cache_init(cache);
+ rw_lock_x_unlock(&cache->init_lock);
+ sync->table->fts->dict_locked = false;
+ mutex_exit(&dict_sys.mutex);
rw_lock_x_unlock(&cache->lock);
if (UNIV_LIKELY(error == DB_SUCCESS)) {
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index fc97aabfa13..cdb4967b663 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -3498,6 +3498,10 @@ ibuf_insert(
ulint zip_size,
que_thr_t* thr)
{
+ if (!index->is_committed()) {
+ return false;
+ }
+
dberr_t err;
ulint entry_size;
ibool no_counter;
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index a22dc3562b5..5a8a8dd7fd9 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -1227,7 +1227,11 @@ is_absolute_path(
}
#ifdef _WIN32
- if (path[1] == ':' && path[2] == OS_PATH_SEPARATOR) {
+ // This will conflict during a 10.5->10.6 merge.
+ // Choose the 10.6 version as is.
+ if (path[1] == ':' &&
+ (path[2] == OS_PATH_SEPARATOR ||
+ path[2] == OS_PATH_SEPARATOR_ALT)) {
return(true);
}
#endif /* _WIN32 */
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 80d51754d5c..5f41205821d 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -684,11 +684,6 @@ error:
row_field, field, col->len,
old_table->space->zip_size(),
conv_heap);
- } else {
- /* Field length mismatch should not
- happen when rebuilding redundant row
- format table. */
- ut_ad(index->table->not_redundant());
}
}
}
diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake
index 46fd1562eac..cfcb0e351b7 100644
--- a/storage/rocksdb/build_rocksdb.cmake
+++ b/storage/rocksdb/build_rocksdb.cmake
@@ -134,8 +134,8 @@ option(WITH_FALLOCATE "build with fallocate" ON)
if(WITH_FALLOCATE AND UNIX)
include(CheckCSourceCompiles)
CHECK_C_SOURCE_COMPILES("
+#define _GNU_SOURCE
#include <fcntl.h>
-#include <linux/falloc.h>
int main() {
int fd = open(\"/dev/null\", 0);
fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024);
diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result
new file mode 100644
index 00000000000..b52cecc5bb7
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result
@@ -0,0 +1,41 @@
+#
+# MDEV-29644 a potential bug of null pointer dereference in spider_db_mbase::print_warnings()
+#
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+connection child2_1;
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+CREATE TABLE tbl_a (
+a CHAR(5)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+SET GLOBAL sql_mode='';
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+CREATE TABLE tbl_a (
+a CHAR(255)
+) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"';
+SET sql_mode='';
+INSERT INTO tbl_a VALUES ("this will be truncated");
+NOT FOUND /\[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*/ in mysqld.1.1.err
+SET GLOBAL spider_log_result_errors=4;
+INSERT INTO tbl_a VALUES ("this will be truncated");
+FOUND 1 /\[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*/ in mysqld.1.1.err
+connection master_1;
+SET GLOBAL spider_log_result_errors=DEFAULT;
+SET sql_mode=DEFAULT;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+SET GLOBAL sql_mode=DEFAULT;
+DROP DATABASE IF EXISTS auto_test_remote;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
diff --git a/storage/spider/mysql-test/spider/bugfix/r/self_reference_multi.result b/storage/spider/mysql-test/spider/bugfix/r/self_reference_multi.result
index c4399ddf9d2..8ddf428b4ea 100644
--- a/storage/spider/mysql-test/spider/bugfix/r/self_reference_multi.result
+++ b/storage/spider/mysql-test/spider/bugfix/r/self_reference_multi.result
@@ -12,9 +12,9 @@ alter table t2 ENGINE=Spider COMMENT='WRAPPER "mysql", srv "srv",TABLE "t0"';
select * from t0;
ERROR HY000: An infinite loop is detected when opening table test.t0
select * from t1;
-ERROR HY000: An infinite loop is detected when opening table test.t0
+ERROR HY000: An infinite loop is detected when opening table test.t1
select * from t2;
-ERROR HY000: An infinite loop is detected when opening table test.t0
+ERROR HY000: An infinite loop is detected when opening table test.t2
drop table t0, t1, t2;
for master_1
for child2
diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf
new file mode 100644
index 00000000000..05dfd8a0bce
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf
@@ -0,0 +1,3 @@
+!include include/default_mysqld.cnf
+!include ../my_1_1.cnf
+!include ../my_2_1.cnf
diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test
new file mode 100644
index 00000000000..3a8fbb251e1
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test
@@ -0,0 +1,56 @@
+--echo #
+--echo # MDEV-29644 a potential bug of null pointer dereference in spider_db_mbase::print_warnings()
+--echo #
+
+# The test case below does not cause the potential null pointer dereference.
+# It is just for checking spider_db_mbase::fetch_and_print_warnings() works.
+
+--disable_query_log
+--disable_result_log
+--source ../../t/test_init.inc
+--enable_result_log
+--enable_query_log
+
+--connection child2_1
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+eval CREATE TABLE tbl_a (
+ a CHAR(5)
+) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+
+SET GLOBAL sql_mode='';
+
+--connection master_1
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+eval CREATE TABLE tbl_a (
+ a CHAR(255)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"';
+
+SET sql_mode='';
+
+let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.1.err;
+let SEARCH_PATTERN= \[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*;
+
+INSERT INTO tbl_a VALUES ("this will be truncated");
+--source include/search_pattern_in_file.inc # should not find
+
+SET GLOBAL spider_log_result_errors=4;
+
+INSERT INTO tbl_a VALUES ("this will be truncated");
+--source include/search_pattern_in_file.inc # should find
+
+--connection master_1
+SET GLOBAL spider_log_result_errors=DEFAULT;
+SET sql_mode=DEFAULT;
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+SET GLOBAL sql_mode=DEFAULT;
+DROP DATABASE IF EXISTS auto_test_remote;
+
+--disable_query_log
+--disable_result_log
+--source ../t/test_deinit.inc
+--enable_query_log
+--enable_result_log
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index d377d2bd807..78734640a52 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -2207,7 +2207,7 @@ int spider_db_mbase::exec_query(
db_conn->affected_rows, db_conn->insert_id,
db_conn->server_status, db_conn->warning_count);
if (spider_param_log_result_errors() >= 3)
- print_warnings(l_time);
+ fetch_and_print_warnings(l_time);
} else if (log_result_errors >= 4)
{
time_t cur_time = (time_t) time((time_t*) 0);
@@ -2289,82 +2289,44 @@ bool spider_db_mbase::is_xa_nota_error(
DBUG_RETURN(xa_nota);
}
-int spider_db_mbase::print_warnings(
- struct tm *l_time
-) {
- int error_num = 0;
- DBUG_ENTER("spider_db_mbase::print_warnings");
- DBUG_PRINT("info",("spider this=%p", this));
- if (db_conn->status == MYSQL_STATUS_READY)
+void spider_db_mbase::fetch_and_print_warnings(struct tm *l_time)
+{
+ DBUG_ENTER("spider_db_mbase::fetch_and_print_warnings");
+
+ if (spider_param_dry_access() || db_conn->status != MYSQL_STATUS_READY ||
+ db_conn->server_status & SERVER_MORE_RESULTS_EXISTS)
+ DBUG_VOID_RETURN;
+
+ if (mysql_real_query(db_conn, SPIDER_SQL_SHOW_WARNINGS_STR,
+ SPIDER_SQL_SHOW_WARNINGS_LEN))
+ DBUG_VOID_RETURN;
+
+ MYSQL_RES *res= mysql_store_result(db_conn);
+ if (!res)
+ DBUG_VOID_RETURN;
+
+ uint num_fields= mysql_num_fields(res);
+ if (num_fields != 3)
{
- if (
-#if MYSQL_VERSION_ID < 50500
- !(db_conn->last_used_con->server_status & SERVER_MORE_RESULTS_EXISTS) &&
- db_conn->last_used_con->warning_count
-#else
- !(db_conn->server_status & SERVER_MORE_RESULTS_EXISTS) &&
- db_conn->warning_count
-#endif
- ) {
- if (
- spider_param_dry_access() ||
- !mysql_real_query(db_conn, SPIDER_SQL_SHOW_WARNINGS_STR,
- SPIDER_SQL_SHOW_WARNINGS_LEN)
- ) {
- MYSQL_RES *res = NULL;
- MYSQL_ROW row = NULL;
- uint num_fields;
- if (
- spider_param_dry_access() ||
- !(res = mysql_store_result(db_conn)) ||
- !(row = mysql_fetch_row(res))
- ) {
- if (mysql_errno(db_conn))
- {
- if (res)
- mysql_free_result(res);
- DBUG_RETURN(0);
- }
- /* no record is ok */
- }
- num_fields = mysql_num_fields(res);
- if (num_fields != 3)
- {
- mysql_free_result(res);
- DBUG_RETURN(0);
- }
- if (l_time)
- {
- while (row)
- {
- fprintf(stderr, "%04d%02d%02d %02d:%02d:%02d [WARN SPIDER RESULT] "
- "from [%s] %ld to %ld: %s %s %s\n",
- l_time->tm_year + 1900, l_time->tm_mon + 1, l_time->tm_mday,
- l_time->tm_hour, l_time->tm_min, l_time->tm_sec,
- conn->tgt_host, (ulong) db_conn->thread_id,
- (ulong) current_thd->thread_id, row[0], row[1], row[2]);
- row = mysql_fetch_row(res);
- }
- } else {
- while (row)
- {
- DBUG_PRINT("info",("spider row[0]=%s", row[0]));
- DBUG_PRINT("info",("spider row[1]=%s", row[1]));
- DBUG_PRINT("info",("spider row[2]=%s", row[2]));
- longlong res_num =
- (longlong) my_strtoll10(row[1], (char**) NULL, &error_num);
- DBUG_PRINT("info",("spider res_num=%lld", res_num));
- my_printf_error((int) res_num, row[2], MYF(0));
- error_num = (int) res_num;
- row = mysql_fetch_row(res);
- }
- }
- if (res)
- mysql_free_result(res);
- }
- }
+ mysql_free_result(res);
+ DBUG_VOID_RETURN;
}
- DBUG_RETURN(error_num);
+
+ MYSQL_ROW row= mysql_fetch_row(res);
+ while (row)
+ {
+ fprintf(stderr,
+ "%04d%02d%02d %02d:%02d:%02d [WARN SPIDER RESULT] from [%s] %ld "
+ "to %ld: %s %s %s\n",
+ l_time->tm_year + 1900, l_time->tm_mon + 1, l_time->tm_mday,
+ l_time->tm_hour, l_time->tm_min, l_time->tm_sec, conn->tgt_host,
+ (ulong) db_conn->thread_id, (ulong) current_thd->thread_id, row[0],
+ row[1], row[2]);
+ row= mysql_fetch_row(res);
+ }
+ mysql_free_result(res);
+
+ DBUG_VOID_RETURN;
}
spider_db_result *spider_db_mbase::store_result(
@@ -14668,9 +14630,11 @@ int spider_mbase_handler::show_table_status(
DBUG_RETURN(error_num);
}
}
- if ((error_num = ((spider_db_mbase *) conn->db_conn)->print_warnings(NULL)))
{
- DBUG_RETURN(error_num);
+ time_t cur_time = (time_t) time((time_t*) 0);
+ struct tm lt;
+ struct tm *l_time = localtime_r(&cur_time, &lt);
+ ((spider_db_mbase *) conn->db_conn)->fetch_and_print_warnings(l_time);
}
if (share->static_records_for_status != -1)
{
diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h
index e90461ea278..757484c3990 100644
--- a/storage/spider/spd_db_mysql.h
+++ b/storage/spider/spd_db_mysql.h
@@ -442,9 +442,7 @@ public:
bool is_xa_nota_error(
int error_num
);
- int print_warnings(
- struct tm *l_time
- );
+ void fetch_and_print_warnings(struct tm *l_time);
spider_db_result *store_result(
spider_db_result_buffer **spider_res_buf,
st_spider_db_request_key *request_key,